Merge "Avoid holding locks when calling ObjectFree callback"
diff --git a/.vpython b/.vpython
new file mode 100644
index 0000000..ed00723
--- /dev/null
+++ b/.vpython
@@ -0,0 +1,25 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the chromium repo, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+# vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
diff --git a/Android.bp b/Android.bp
deleted file mode 100644
index 34a6469..0000000
--- a/Android.bp
+++ /dev/null
@@ -1,56 +0,0 @@
-// TODO: These should be handled with transitive static library dependencies
-art_static_dependencies = [
- // Note: the order is important because of static linking resolution.
- "libziparchive",
- "libnativehelper",
- "libnativebridge",
- "libnativeloader",
- "libsigchain_dummy",
- "liblog",
- "libz",
- "libbacktrace",
- "libcutils",
- "libunwindstack",
- "libutils",
- "libbase",
- "liblz4",
- "liblzma",
- "libmetricslogger_static",
-]
-
-subdirs = [
- "adbconnection",
- "benchmark",
- "build",
- "cmdline",
- "compiler",
- "dalvikvm",
- "dex2oat",
- "dexdump",
- "dexlayout",
- "dexlist",
- "dexoptanalyzer",
- "disassembler",
- "dt_fd_forward",
- "dt_fd_forward/export",
- "imgdiag",
- "libartbase",
- "libdexfile",
- "libprofile",
- "oatdump",
- "openjdkjvm",
- "openjdkjvmti",
- "patchoat",
- "profman",
- "runtime",
- "sigchainlib",
- "simulator",
- "test",
- "tools",
- "tools/breakpoint-logger",
- "tools/cpp-define-generator",
- "tools/dmtracedump",
- "tools/hiddenapi",
- "tools/titrace",
- "tools/wrapagentproperties",
-]
diff --git a/Android.mk b/Android.mk
index 7852be5..b9f6170 100644
--- a/Android.mk
+++ b/Android.mk
@@ -98,6 +98,8 @@
include $(art_path)/build/Android.gtest.mk
include $(art_path)/test/Android.run-test.mk
+TEST_ART_TARGET_SYNC_DEPS += $(ART_TEST_TARGET_GTEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES)
+
# Make sure /system is writable on the device.
TEST_ART_ADB_ROOT_AND_REMOUNT := \
($(ADB) root && \
@@ -341,7 +343,6 @@
libart-compiler \
libopenjdkjvm \
libopenjdkjvmti \
- patchoat \
profman \
libadbconnection \
@@ -367,7 +368,6 @@
libopenjdkd \
libopenjdkjvmd \
libopenjdkjvmtid \
- patchoatd \
profmand \
libadbconnectiond \
@@ -427,7 +427,7 @@
define build-art-hiddenapi
$(shell if [ ! -d frameworks/base ]; then \
mkdir -p ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING; \
- touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-{blacklist,dark-greylist,light-greylist}.txt; \
+ touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-{whitelist,blacklist,dark-greylist,light-greylist}.txt; \
fi;)
endef
@@ -451,7 +451,7 @@
# Also include libartbenchmark, we always include it when running golem.
# libstdc++ is needed when building for ART_TARGET_LINUX.
ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so
-build-art-target-golem: dex2oat dalvikvm patchoat linker libstdc++ \
+build-art-target-golem: dex2oat dalvikvm linker libstdc++ \
$(TARGET_OUT_EXECUTABLES)/art \
$(TARGET_OUT)/etc/public.libraries.txt \
$(ART_TARGET_DEX_DEPENDENCIES) \
@@ -486,7 +486,7 @@
build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
.PHONY: build-art-target-tests
-build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
+build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_RUN_TEST_DEPENDENCIES) $(ART_TEST_TARGET_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES)
########################################################################
# targets to switch back and forth from libdvm to libart
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 7e492c7..60ad35c 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,5 +1,4 @@
[Hook Scripts]
-check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date
check_generated_tests_up_to_date = tools/test_presubmit.py
[Builtin Hooks]
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index ad94148..2050133 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -164,8 +164,8 @@
art::WellKnownClasses::java_lang_Thread_init,
thr_group.get(),
thr_name.get(),
- /*Priority*/ 0,
- /*Daemon*/ true);
+ /*Priority=*/ 0,
+ /*Daemon=*/ true);
}
struct CallbackData {
@@ -251,6 +251,8 @@
runtime->StartThreadBirth();
}
ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
+ // Note: Using pthreads instead of std::thread to not abort when the thread cannot be
+ // created (exception support required).
pthread_t pthread;
std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
started_debugger_threads_ = true;
@@ -268,7 +270,7 @@
runtime->EndThreadBirth();
return;
}
- data.release();
+ data.release(); // NOLINT pthreads API.
}
static bool FlagsSet(int16_t data, int16_t flags) {
@@ -289,7 +291,7 @@
// If the agent isn't loaded we might need to tell ddms code the connection is closed.
if (!agent_loaded_ && notified_ddm_active_) {
- NotifyDdms(/*active*/false);
+ NotifyDdms(/*active=*/false);
}
}
@@ -605,7 +607,7 @@
if (memcmp(kListenStartMessage, buf, sizeof(kListenStartMessage)) == 0) {
agent_listening_ = true;
if (adb_connection_socket_ != -1) {
- SendAgentFds(/*require_handshake*/ !performed_handshake_);
+ SendAgentFds(/*require_handshake=*/ !performed_handshake_);
}
} else if (memcmp(kListenEndMessage, buf, sizeof(kListenEndMessage)) == 0) {
agent_listening_ = false;
@@ -647,7 +649,7 @@
VLOG(jdwp) << "Sending fds as soon as we received them.";
// The agent was already loaded so this must be after a disconnection. Therefore have the
// transport perform the handshake.
- SendAgentFds(/*require_handshake*/ true);
+ SendAgentFds(/*require_handshake=*/ true);
}
} else if (FlagsSet(control_sock_poll.revents, POLLRDHUP)) {
// The other end of the adb connection just dropped it.
@@ -663,7 +665,7 @@
} else if (agent_listening_ && !sent_agent_fds_) {
VLOG(jdwp) << "Sending agent fds again on data.";
// Agent was already loaded so it can deal with the handshake.
- SendAgentFds(/*require_handshake*/ true);
+ SendAgentFds(/*require_handshake=*/ true);
}
} else if (FlagsSet(adb_socket_poll.revents, POLLRDHUP)) {
DCHECK(!agent_has_socket_);
@@ -763,7 +765,7 @@
}
if (!notified_ddm_active_) {
- NotifyDdms(/*active*/ true);
+ NotifyDdms(/*active=*/ true);
}
uint32_t reply_type;
std::vector<uint8_t> reply;
@@ -826,9 +828,9 @@
void AdbConnectionState::AttachJdwpAgent(art::Thread* self) {
art::Runtime* runtime = art::Runtime::Current();
self->AssertNoPendingException();
- runtime->AttachAgent(/* JNIEnv */ nullptr,
+ runtime->AttachAgent(/* env= */ nullptr,
MakeAgentArg(),
- /* classloader */ nullptr);
+ /* class_loader= */ nullptr);
if (self->IsExceptionPending()) {
LOG(ERROR) << "Failed to load agent " << agent_name_;
art::ScopedObjectAccess soa(self);
diff --git a/build/Android.bp b/build/Android.bp
index 62f71ff..47a540d 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -18,16 +18,36 @@
}
art_clang_tidy_errors = [
- // Protect scoped things like MutexLock.
- "bugprone-unused-raii",
+ "bugprone-argument-comment",
+ "bugprone-lambda-function-name",
+ "bugprone-unused-raii", // Protect scoped things like MutexLock.
+ "bugprone-unused-return-value",
+ "bugprone-virtual-near-miss",
+ "modernize-use-bool-literals",
+ "modernize-use-nullptr",
+ "modernize-use-using",
+ "performance-faster-string-find",
"performance-for-range-copy",
+ "performance-implicit-conversion-in-loop",
+ "performance-noexcept-move-constructor",
"performance-unnecessary-copy-initialization",
"performance-unnecessary-value-param",
"misc-unused-using-decls",
]
// Should be: strings.Join(art_clang_tidy_errors, ",").
-art_clang_tidy_errors_str = "bugprone-unused-raii"
+art_clang_tidy_errors_str = "bugprone-argument-comment"
+ + ",bugprone-lambda-function-name"
+ + ",bugprone-unused-raii"
+ + ",bugprone-unused-return-value"
+ + ",bugprone-virtual-near-miss"
+ + ",modernize-redundant-void-arg"
+ + ",modernize-use-bool-literals"
+ + ",modernize-use-nullptr"
+ + ",modernize-use-using"
+ + ",performance-faster-string-find"
+ ",performance-for-range-copy"
+ + ",performance-implicit-conversion-in-loop"
+ + ",performance-noexcept-move-constructor"
+ ",performance-unnecessary-copy-initialization"
+ ",performance-unnecessary-value-param"
+ ",misc-unused-using-decls"
@@ -41,9 +61,11 @@
// We have lots of C-style variadic functions, and are OK with them. JNI ensures
// that working around this warning would be extra-painful.
"-cert-dcl50-cpp",
- // No exceptions.
- "-misc-noexcept-move-constructor",
- "-performance-noexcept-move-constructor",
+ // "Modernization" we don't agree with.
+ "-modernize-use-auto",
+ "-modernize-return-braced-init-list",
+ "-modernize-use-default-member-init",
+ "-modernize-pass-by-value",
]
art_global_defaults {
@@ -88,6 +110,10 @@
"-Wunreachable-code-break",
"-Wunreachable-code-return",
+ // Disable warning for use of offsetof on non-standard layout type.
+ // We use it to implement OFFSETOF_MEMBER - see macros.h.
+ "-Wno-invalid-offsetof",
+
// Enable thread annotations for std::mutex, etc.
"-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
],
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 316ce64..d024e77 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -93,6 +93,6 @@
endif
ADB_EXECUTABLE := $(HOST_OUT_EXECUTABLES)/adb
-ADB := $(ADB_EXECUTABLE)
+ADB ?= $(ADB_EXECUTABLE)
endif # ART_ANDROID_COMMON_MK
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 3247e54..96d3648 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -108,7 +108,6 @@
dexoptanalyzer \
imgdiag \
oatdump \
- patchoat \
profman \
ART_CORE_EXECUTABLES := \
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index c508fe7..17d0232 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -20,11 +20,12 @@
include art/build/Android.common_path.mk
# Directory used for temporary test files on the host.
-# Use a BSD checksum calculated from CWD and USER as one of the path
+# Use a hash calculated from CWD and USER as one of the path
# components for the test output. This should allow us to run tests from
# multiple repositories at the same time.
+# We only take the first few characters to keep paths short.
ART_TMPDIR := $(if $(TMPDIR),$(TMPDIR),/tmp)
-ART_HOST_TEST_DIR := $(ART_TMPDIR)/test-art-$(shell echo $$CWD-${USER} | sum | cut -d ' ' -f1)
+ART_HOST_TEST_DIR := $(ART_TMPDIR)/test-art-$(shell echo $$CWD-${USER} | $(MD5SUM) | cut -c-5)
# List of known broken tests that we won't attempt to execute. The test name must be the full
# rule name such as test-art-host-oat-optimizing-HelloWorld64.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 20f20c9..e2a0a39 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,6 +61,7 @@
StaticLeafMethods \
Statics \
StaticsFromCode \
+ StringLiterals \
Transaction \
XandY
@@ -174,7 +175,7 @@
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dexanalyze_test_DEX_DEPS := MultiDex
ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed StringLiterals
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
@@ -191,11 +192,11 @@
ART_GTEST_oat_test_DEX_DEPS := Main
ART_GTEST_oat_writer_test_DEX_DEPS := Main
ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
-ART_GTEST_patchoat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
ART_GTEST_proxy_test_DEX_DEPS := Interfaces
ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
+ART_GTEST_profiling_info_test_DEX_DEPS := ProfileTestMultiDex
ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
ART_GTEST_stub_test_DEX_DEPS := AllFields
ART_GTEST_transaction_test_DEX_DEPS := Transaction
@@ -214,14 +215,12 @@
$(HOST_CORE_IMAGE_optimizing_64) \
$(HOST_CORE_IMAGE_optimizing_32) \
$(HOST_CORE_IMAGE_interpreter_64) \
- $(HOST_CORE_IMAGE_interpreter_32) \
- patchoatd-host
+ $(HOST_CORE_IMAGE_interpreter_32)
ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_optimizing_64) \
$(TARGET_CORE_IMAGE_optimizing_32) \
$(TARGET_CORE_IMAGE_interpreter_64) \
- $(TARGET_CORE_IMAGE_interpreter_32) \
- patchoatd-target
+ $(TARGET_CORE_IMAGE_interpreter_32)
ART_GTEST_oat_file_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -235,10 +234,10 @@
ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
- dexoptanalyzerd-host
+ $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd
ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
- dexoptanalyzerd-target
+ $(TARGET_OUT_EXECUTABLES)/dexoptanalyzerd
ART_GTEST_image_space_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -247,68 +246,68 @@
ART_GTEST_dex2oat_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
- dex2oatd-host
+ $(HOST_OUT_EXECUTABLES)/dex2oatd
ART_GTEST_dex2oat_test_TARGET_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
- dex2oatd-target
+ $(TARGET_OUT_EXECUTABLES)/dex2oatd
ART_GTEST_dex2oat_image_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
- dex2oatd-host
+ $(HOST_OUT_EXECUTABLES)/dex2oatd
ART_GTEST_dex2oat_image_test_TARGET_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
- dex2oatd-target
+ $(TARGET_OUT_EXECUTABLES)/dex2oatd
# TODO: document why this is needed.
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
# The dexdiag test requires the dexdiag utility.
-ART_GTEST_dexdiag_test_HOST_DEPS := dexdiag-host
-ART_GTEST_dexdiag_test_TARGET_DEPS := dexdiag-target
+ART_GTEST_dexdiag_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/dexdiag
+ART_GTEST_dexdiag_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/dexdiag
# The dexdump test requires an image and the dexdump utility.
# TODO: rename into dexdump when migration completes
ART_GTEST_dexdump_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- dexdump2-host
+ $(HOST_OUT_EXECUTABLES)/dexdump2
ART_GTEST_dexdump_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexdump2-target
+ $(TARGET_OUT_EXECUTABLES)/dexdump2
# The dexanalyze test requires an image and the dexanalyze utility.
ART_GTEST_dexanalyze_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- dexanalyze-host
+ $(HOST_OUT_EXECUTABLES)/dexanalyze
ART_GTEST_dexanalyze_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexanalyze-target
+ $(TARGET_OUT_EXECUTABLES)/dexanalyze
# The dexlayout test requires an image and the dexlayout utility.
# TODO: rename into dexdump when migration completes
ART_GTEST_dexlayout_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- dexlayoutd-host \
- dexdump2-host
+ $(HOST_OUT_EXECUTABLES)/dexlayoutd \
+ $(HOST_OUT_EXECUTABLES)/dexdump2
ART_GTEST_dexlayout_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexlayoutd-target \
- dexdump2-target
+ $(TARGET_OUT_EXECUTABLES)/dexlayoutd \
+ $(TARGET_OUT_EXECUTABLES)/dexdump2
# The dexlist test requires an image and the dexlist utility.
ART_GTEST_dexlist_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- dexlist-host
+ $(HOST_OUT_EXECUTABLES)/dexlist
ART_GTEST_dexlist_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- dexlist-target
+ $(TARGET_OUT_EXECUTABLES)/dexlist
# The imgdiag test has dependencies on core.oat since it needs to load it during the test.
# For the host, also add the installed tool (in the base size, that should suffice). For the
@@ -316,51 +315,46 @@
ART_GTEST_imgdiag_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- imgdiagd-host
+ $(HOST_OUT_EXECUTABLES)/imgdiagd
ART_GTEST_imgdiag_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- imgdiagd-target
+ $(TARGET_OUT_EXECUTABLES)/imgdiagd
# Dex analyze test requires dexanalyze.
ART_GTEST_dexanalyze_test_HOST_DEPS := \
- dexanalyze-host
+ $(HOST_OUT_EXECUTABLES)/dexanalyze
ART_GTEST_dexanalyze_test_TARGET_DEPS := \
- dexanalyze-target
+ $(TARGET_OUT_EXECUTABLES)/dexanalyze
# Oatdump test requires an image and oatfile to dump.
ART_GTEST_oatdump_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- oatdumpd-host \
- oatdumpds-host \
- dexdump2-host
+ $(HOST_OUT_EXECUTABLES)/oatdumpd \
+ $(HOST_OUT_EXECUTABLES)/oatdumpds \
+ $(HOST_OUT_EXECUTABLES)/dexdump2
ART_GTEST_oatdump_test_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_DEFAULT_64) \
$(TARGET_CORE_IMAGE_DEFAULT_32) \
- oatdumpd-target \
- dexdump2-target
+ $(TARGET_OUT_EXECUTABLES)/oatdumpd \
+ $(TARGET_OUT_EXECUTABLES)/dexdump2
ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) \
- dex2oatd-host \
- dex2oatds-host
+ $(HOST_OUT_EXECUTABLES)/dex2oatd \
+ $(HOST_OUT_EXECUTABLES)/dex2oatds
ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) \
- dex2oatd-target
-
-ART_GTEST_patchoat_test_HOST_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
-ART_GTEST_patchoat_test_TARGET_DEPS := \
- $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+ $(TARGET_OUT_EXECUTABLES)/dex2oatd
# Profile assistant tests requires profman utility.
-ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
-ART_GTEST_profile_assistant_test_TARGET_DEPS := profmand-target
+ART_GTEST_profile_assistant_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/profmand
+ART_GTEST_profile_assistant_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/profmand
ART_GTEST_hiddenapi_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_DEFAULT_64) \
$(HOST_CORE_IMAGE_DEFAULT_32) \
- hiddenapid-host
+ $(HOST_OUT_EXECUTABLES)/hiddenapid
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -382,7 +376,6 @@
art_libdexfile_tests \
art_libprofile_tests \
art_oatdump_tests \
- art_patchoat_tests \
art_profman_tests \
art_runtime_tests \
art_runtime_compiler_tests \
@@ -412,6 +405,7 @@
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
ART_TEST_HOST_GTEST_DEPENDENCIES :=
+ART_TEST_TARGET_GTEST_DEPENDENCIES :=
ART_GTEST_TARGET_ANDROID_ROOT := '/system'
ifneq ($(ART_TEST_ANDROID_ROOT),)
@@ -440,7 +434,7 @@
# Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
# to ensure files are pushed to the device.
- TEST_ART_TARGET_SYNC_DEPS += \
+ gtest_deps := \
$$(ART_GTEST_$(1)_TARGET_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
$$(gtest_exe) \
@@ -450,6 +444,8 @@
$$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+ ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
+
$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
$$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
@@ -481,6 +477,7 @@
maybe_chroot_command :=
maybe_art_test_chroot :=
gtest_target_exe :=
+ gtest_deps :=
gtest_exe :=
gtest_rule :=
endef # define-art-gtest-rule-target
@@ -490,7 +487,10 @@
# $(2): path relative to $OUT to the test binary
# $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
define define-art-gtest-rule-host
- gtest_rule := test-art-host-gtest-$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX)
+ gtest_suffix := $(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX)
+ gtest_rule := test-art-host-gtest-$$(gtest_suffix)
+ gtest_output := $(call intermediates-dir-for,PACKAGING,art-host-gtest,HOST)/$$(gtest_suffix).xml
+ $$(call dist-for-goals,$$(gtest_rule),$$(gtest_output):gtest/$$(gtest_suffix))
gtest_exe := $(OUT_DIR)/$(2)
# Dependencies for all host gtests.
gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
@@ -499,20 +499,20 @@
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
$(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
- ifneq (,$(DIST_DIR))
- gtest_xml_output := --gtest_output=xml:$(DIST_DIR)/gtest/$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX).xml
- else
- gtest_xml_output :=
- endif
ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
.PHONY: $$(gtest_rule)
+$$(gtest_rule): $$(gtest_output)
+
+# Re-run the tests, even if nothing changed. Until the build system has a dedicated "no cache"
+# option, claim to write a file that is never produced.
+$$(gtest_output): .KATI_IMPLICIT_OUTPUTS := $$(gtest_output)-nocache
+$$(gtest_output): NAME := $$(gtest_rule)
ifeq (,$(SANITIZE_HOST))
-$$(gtest_rule): PRIVATE_XML_OUTPUT := $$(gtest_xml_output)
-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< $$(PRIVATE_XML_OUTPUT) && \
- $$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@)
+$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
+ $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && $$< --gtest_output=xml:$$@ && \
+ $$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
else
# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
# build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so
@@ -521,14 +521,13 @@
# (with the x86-64 ABI, as this allows symbolization of both x86 and x86-64). We don't do this in
# general as it loses all the color output, and we have our own symbolization step when not running
# under ASAN.
-$$(gtest_rule): PRIVATE_XML_OUTPUT := $$(gtest_xml_output)
-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && set -o pipefail && \
- ASAN_OPTIONS=detect_leaks=1 $$< $$(PRIVATE_XML_OUTPUT) 2>&1 | tee $$<.tmp.out >&2 && \
- { $$(call ART_TEST_PASSED,$$@) ; rm $$<.tmp.out ; }) || \
+$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
+ $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
+ ASAN_OPTIONS=detect_leaks=1 $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
+ { $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
{ echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
- rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$@))
+ rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$(NAME)))
endif
ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
@@ -539,7 +538,9 @@
# Clear locally defined variables.
gtest_deps :=
gtest_exe :=
+ gtest_output :=
gtest_rule :=
+ gtest_suffix :=
endef # define-art-gtest-rule-host
# Define the rules to build and run host and target gtests.
@@ -733,9 +734,6 @@
ART_GTEST_dex2oat_image_test_HOST_DEPS :=
ART_GTEST_dex2oat_image_test_TARGET_DEPS :=
ART_GTEST_object_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_DEX_DEPS :=
-ART_GTEST_patchoat_test_HOST_DEPS :=
-ART_GTEST_patchoat_test_TARGET_DEPS :=
ART_GTEST_proxy_test_DEX_DEPS :=
ART_GTEST_reflection_test_DEX_DEPS :=
ART_GTEST_stub_test_DEX_DEPS :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index c4ae593..e2adac1 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -39,7 +39,6 @@
# Use dex2oat debug version for better error reporting
# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
# $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# $(3): multi-image.
# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
# run-test --no-image
define create-core-oat-host-rules
@@ -66,25 +65,14 @@
$$(error found $(1) expected interpreter, interp-ac, or optimizing)
endif
- # If $(3) is true, generate a multi-image.
- ifeq ($(3),true)
- core_multi_infix := -multi
- core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar
- core_multi_group := _multi
- else
- core_multi_infix :=
- core_multi_param :=
- core_multi_group :=
- endif
-
- core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_IMG_SUFFIX)
- core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_OAT_SUFFIX)
+ core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
# Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
ifeq ($(2),)
- HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
+ HOST_CORE_IMAGE_$(1)_64 := $$(core_image_name)
else
- HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
+ HOST_CORE_IMAGE_$(1)_32 := $$(core_image_name)
endif
HOST_CORE_IMG_OUTS += $$(core_image_name)
HOST_CORE_OAT_OUTS += $$(core_oat_name)
@@ -92,7 +80,6 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param)
$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@"
@mkdir -p $$(dir $$@)
@@ -104,9 +91,10 @@
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
$$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
--host --android-root=$$(HOST_OUT) \
- --generate-debug-info --generate-build-id --compile-pic \
+ --generate-debug-info --generate-build-id \
--runtime-arg -XX:SlowDebug=true \
- $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS)
+ --no-inline-from=core-oj-hostdex.jar \
+ $$(PRIVATE_CORE_COMPILE_OPTIONS)
$$(core_oat_name): $$(core_image_name)
@@ -119,21 +107,17 @@
endef # create-core-oat-host-rules
# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
-# $(2): multi-image.
define create-core-oat-host-rule-combination
- $(call create-core-oat-host-rules,$(1),,$(2))
+ $(call create-core-oat-host-rules,$(1),)
ifneq ($(HOST_PREFER_32_BIT),true)
- $(call create-core-oat-host-rules,$(1),2ND_,$(2))
+ $(call create-core-oat-host-rules,$(1),2ND_)
endif
endef
-$(eval $(call create-core-oat-host-rule-combination,optimizing,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,false))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,true))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,true))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,true))
+$(eval $(call create-core-oat-host-rule-combination,optimizing))
+$(eval $(call create-core-oat-host-rule-combination,interpreter))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac))
.PHONY: test-art-host-dex2oat-host
test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
@@ -197,7 +181,7 @@
--instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$$(PRODUCT_OUT)/system \
- --generate-debug-info --generate-build-id --compile-pic \
+ --generate-debug-info --generate-build-id \
--runtime-arg -XX:SlowDebug=true \
$$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
diff --git a/build/art.go b/build/art.go
index 61b1a4e..0df46b1 100644
--- a/build/art.go
+++ b/build/art.go
@@ -66,7 +66,7 @@
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
}
- if envTrue(ctx, "ART_USE_GENERATIONAL_CC") {
+ if !envFalse(ctx, "ART_USE_GENERATIONAL_CC") {
cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
}
@@ -154,7 +154,8 @@
if len(ctx.AConfig().SanitizeHost()) > 0 {
// art/test/137-cfi/cfi.cc
// error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess'
- hostFrameSizeLimit = 6400
+ // error: stack frame size of 6520 bytes in function 'art::interpreter::ExecuteSwitchImplCpp'
+ hostFrameSizeLimit = 7400
}
cflags = append(cflags,
fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit),
@@ -283,6 +284,7 @@
android.RegisterModuleType("art_cc_test_library", artTestLibrary)
android.RegisterModuleType("art_cc_defaults", artDefaultsFactory)
android.RegisterModuleType("libart_cc_defaults", libartDefaultsFactory)
+ android.RegisterModuleType("libart_static_cc_defaults", libartStaticDefaultsFactory)
android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
}
@@ -336,6 +338,33 @@
return module
}
+func libartStaticDefaultsFactory() android.Module {
+ c := &codegenProperties{}
+ module := cc.DefaultsFactory(c)
+ android.AddLoadHook(module, func(ctx android.LoadHookContext) {
+ codegen(ctx, c, true)
+
+ type props struct {
+ Target struct {
+ Android struct {
+ Static_libs []string
+ }
+ }
+ }
+
+ p := &props{}
+ // TODO: express this in .bp instead b/79671158
+ if !envTrue(ctx, "ART_TARGET_LINUX") {
+ p.Target.Android.Static_libs = []string{
+ "libmetricslogger_static",
+ }
+ }
+ ctx.AppendProperties(p)
+ })
+
+ return module
+}
+
func artLibrary() android.Module {
m, _ := cc.NewLibrary(android.HostAndDeviceSupported)
module := m.Init()
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 42c6a5f..97daafa 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -131,7 +131,7 @@
art::InitLogging(nullptr, art::Runtime::Abort); // argv = null
}
- virtual void SetUp() {
+ void SetUp() override {
parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options
}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index c365537..c2f8e3c 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -191,6 +191,15 @@
export_include_dirs: ["."],
}
+cc_defaults {
+ name: "libart-compiler_static_base_defaults",
+ static_libs: [
+ "libbase",
+ "libcutils",
+ "liblzma",
+ ],
+}
+
gensrcs {
name: "art_compiler_operator_srcs",
cmd: "$(location generate_operator_out) art/compiler $(in) > $(out)",
@@ -260,6 +269,18 @@
},
}
+cc_defaults {
+ name: "libart-compiler_static_defaults",
+ defaults: [
+ "libart-compiler_static_base_defaults",
+ "libart_static_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ ],
+ static_libs: ["libart-compiler"],
+}
+
art_cc_library {
name: "libartd-compiler",
defaults: [
@@ -302,6 +323,18 @@
],
}
+cc_defaults {
+ name: "libartd-compiler_static_defaults",
+ defaults: [
+ "libart-compiler_static_base_defaults",
+ "libartd_static_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ ],
+ static_libs: ["libartd-compiler"],
+}
+
art_cc_library {
name: "libart-compiler-gtest",
defaults: ["libart-gtest-defaults"],
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index d603d96..586891a 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -108,7 +108,7 @@
int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
CHECK_EQ(result, 0);
- FlushInstructionCache(reinterpret_cast<char*>(base), reinterpret_cast<char*>(base + len));
+ FlushInstructionCache(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len));
}
void CommonCompilerTest::MakeExecutable(ObjPtr<mirror::ClassLoader> class_loader,
diff --git a/compiler/compiler.h b/compiler/compiler.h
index ef3d87f..8c07773 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -71,6 +71,7 @@
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
+ bool baseline ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED,
jit::JitLogger* jit_logger ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index bda7108..fe05992 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -41,26 +41,14 @@
namespace art {
namespace debug {
-typedef std::vector<DexFile::LocalInfo> LocalInfos;
-
-static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
- static_cast<LocalInfos*>(ctx)->push_back(entry);
-}
-
static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
+ DCHECK(mi->dex_file != nullptr);
CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index);
if (accessor.HasCodeItem()) {
- DCHECK(mi->dex_file != nullptr);
- const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset());
- if (stream != nullptr) {
- DecodeUnsignedLeb128(&stream); // line.
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- uint32_t id = DecodeUnsignedLeb128P1(&stream);
- names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id)));
- }
- }
+ accessor.VisitParameterNames([&](const dex::StringIndex& id) {
+ names.push_back(mi->dex_file->StringDataByIdx(id));
+ });
}
return names;
}
@@ -257,11 +245,12 @@
}
// Write local variables.
- LocalInfos local_infos;
+ std::vector<DexFile::LocalInfo> local_infos;
if (accessor.DecodeDebugLocalInfo(is_static,
mi->dex_method_index,
- LocalInfoCallback,
- &local_infos)) {
+ [&](const DexFile::LocalInfo& entry) {
+ local_infos.push_back(entry);
+ })) {
for (const DexFile::LocalInfo& var : local_infos) {
if (var.reg_ < accessor.RegistersSize() - accessor.InsSize()) {
info_.StartTag(DW_TAG_variable);
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 3d78943..0a13a92 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -34,11 +34,6 @@
typedef std::vector<DexFile::PositionInfo> PositionInfos;
-static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
- static_cast<PositionInfos*>(ctx)->push_back(entry);
- return false;
-}
-
template<typename ElfTypes>
class ElfDebugLineWriter {
using Elf_Addr = typename ElfTypes::Addr;
@@ -154,11 +149,14 @@
Elf_Addr method_address = base_address + mi->code_address;
PositionInfos dex2line_map;
- DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
+ DCHECK(dex != nullptr);
CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
- const uint32_t debug_info_offset = accessor.DebugInfoOffset();
- if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) {
+ if (!accessor.DecodeDebugPositionInfo(
+ [&](const DexFile::PositionInfo& entry) {
+ dex2line_map.push_back(entry);
+ return false;
+ })) {
continue;
}
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index fe8b766..183173b 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -41,7 +41,7 @@
class Matcher {
public:
// Match function type.
- typedef bool MatchFn(Matcher* matcher);
+ using MatchFn = bool(Matcher*);
template <size_t size>
static bool Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]);
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index b7117bd..e92b67a 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,11 +38,6 @@
void ClassRejected(ClassReference ref) override;
- // We are running in an environment where we can call patchoat safely so we should.
- bool IsRelocationPossible() override {
- return true;
- }
-
verifier::VerifierDeps* GetVerifierDeps() const override {
return verifier_deps_.get();
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1e0b94d..dd947d9 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -79,7 +79,7 @@
if (inserted) {
// Successfully added, release the unique_ptr since we no longer have ownership.
DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get());
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
} else {
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
@@ -117,7 +117,7 @@
/*expected*/ nullptr,
verified_method.get()) ==
AtomicMap::InsertResult::kInsertResultSuccess) {
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 21975de..89ac308 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -708,9 +708,9 @@
}
}
-static void ResolveConstStrings(CompilerDriver* driver,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ TimingLogger* timings) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -721,12 +721,18 @@
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
for (ClassAccessor accessor : dex_file->GetClasses()) {
- if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
+ if (!IsClassToCompile(accessor.GetDescriptor())) {
// Compilation is skipped, do not resolve const-string in code of this class.
// FIXME: Make sure that inlining honors this. b/26687569
continue;
}
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (only_startup_strings &&
+ profile_compilation_info_ != nullptr &&
+ !profile_compilation_info_->GetMethodHotness(method.GetReference()).IsStartup()) {
+ continue;
+ }
+
// Resolve const-strings in the code. Done to have deterministic allocation behavior. Right
// now this is single-threaded for simplicity.
// TODO: Collect the relevant string indices in parallel, then allocate them sequentially
@@ -897,8 +903,10 @@
if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
// Resolve strings from const-string. Do this now to have a deterministic image.
- ResolveConstStrings(this, dex_files, timings);
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
+ } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
}
Verify(class_loader, dex_files, timings);
@@ -1886,7 +1894,9 @@
class VerifyClassVisitor : public CompilationVisitor {
public:
VerifyClassVisitor(const ParallelCompilationManager* manager, verifier::HardFailLogMode log_level)
- : manager_(manager), log_level_(log_level) {}
+ : manager_(manager),
+ log_level_(log_level),
+ sdk_version_(Runtime::Current()->GetTargetSdkVersion()) {}
void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) override {
ScopedTrace trace(__FUNCTION__);
@@ -1923,6 +1933,7 @@
Runtime::Current()->GetCompilerCallbacks(),
true /* allow soft failures */,
log_level_,
+ sdk_version_,
&error_msg);
if (failure_kind == verifier::FailureKind::kHardFailure) {
LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
@@ -1995,6 +2006,7 @@
private:
const ParallelCompilationManager* const manager_;
const verifier::HardFailLogMode log_level_;
+ const uint32_t sdk_version_;
};
void CompilerDriver::VerifyDexFile(jobject class_loader,
@@ -2153,10 +2165,9 @@
// Otherwise it's in app image but superclasses can't be initialized, no need to proceed.
old_status = klass->GetStatus();
- bool too_many_encoded_fields = false;
- if (!is_boot_image && klass->NumStaticFields() > kMaxEncodedFields) {
- too_many_encoded_fields = true;
- }
+ bool too_many_encoded_fields = !is_boot_image &&
+ klass->NumStaticFields() > kMaxEncodedFields;
+
// If the class was not initialized, we can proceed to see if we can initialize static
// fields. Limit the max number of encoded fields.
if (!klass->IsInitialized() &&
@@ -2206,9 +2217,13 @@
if (success) {
runtime->ExitTransactionMode();
DCHECK(!runtime->IsActiveTransaction());
- }
- if (!success) {
+ if (is_boot_image) {
+ // For boot image, we want to put the updated status in the oat class since we
+ // can't reject the image anyways.
+ old_status = klass->GetStatus();
+ }
+ } else {
CHECK(soa.Self()->IsExceptionPending());
mirror::Throwable* exception = soa.Self()->GetException();
VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
@@ -2222,10 +2237,6 @@
soa.Self()->ClearException();
runtime->RollbackAllTransactions();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
- } else if (is_boot_image) {
- // For boot image, we want to put the updated status in the oat class since we can't
- // reject the image anyways.
- old_status = klass->GetStatus();
}
}
@@ -2560,8 +2571,8 @@
thread_pool);
auto compile = [&context, &compile_fn](size_t class_def_index) {
- ScopedTrace trace(__FUNCTION__);
const DexFile& dex_file = *context.GetDexFile();
+ SCOPED_TRACE << "compile " << dex_file.GetLocation() << "@" << class_def_index;
ClassLinker* class_linker = context.GetClassLinker();
jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
@@ -2643,7 +2654,7 @@
LOG(INFO) << "[ProfileGuidedCompilation] " <<
((profile_compilation_info_ == nullptr)
? "null"
- : profile_compilation_info_->DumpInfo(&dex_files));
+ : profile_compilation_info_->DumpInfo(dex_files));
}
dex_to_dex_compiler_.ClearState();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 343f67c..9a83e55 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -430,6 +430,12 @@
typedef AtomicDexRefMap<MethodReference, CompiledMethod*> MethodTable;
private:
+ // Resolve const string literals that are loaded from dex code. If only_startup_strings is
+ // specified, only methods that are marked startup in the profile are resolved.
+ void ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ /*inout*/ TimingLogger* timings);
+
// All method references that this compiler has compiled.
MethodTable compiled_methods_;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 8cc6cf1..6b0e456 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -47,6 +47,7 @@
boot_image_(false),
core_image_(false),
app_image_(false),
+ baseline_(false),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
@@ -68,6 +69,7 @@
force_determinism_(false),
deduplicate_code_(true),
count_hotness_in_compiled_code_(false),
+ resolve_startup_const_strings_(false),
register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
passes_to_run_(nullptr) {
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 34aceba..4a6bbfa 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -193,6 +193,10 @@
return boot_image_;
}
+ bool IsBaseline() const {
+ return baseline_;
+ }
+
// Are we compiling a core image (small boot image only used for ART testing)?
bool IsCoreImage() const {
// Ensure that `core_image_` => `boot_image_`.
@@ -309,6 +313,10 @@
return count_hotness_in_compiled_code_;
}
+ bool ResolveStartupConstStrings() const {
+ return resolve_startup_const_strings_;
+ }
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -346,6 +354,7 @@
bool boot_image_;
bool core_image_;
bool app_image_;
+ bool baseline_;
bool debuggable_;
bool generate_debug_info_;
bool generate_mini_debug_info_;
@@ -387,6 +396,10 @@
// won't be atomic for performance reasons, so we accept races, just like in interpreter.
bool count_hotness_in_compiled_code_;
+ // Whether we eagerly resolve all of the const strings that are loaded from startup methods in the
+ // profile.
+ bool resolve_startup_const_strings_;
+
RegisterAllocator::Strategy register_allocation_strategy_;
// If not null, specifies optimization passes which will be run instead of defaults.
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 32fc887..5a84495 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -43,9 +43,6 @@
}
options->SetCompilerFilter(compiler_filter);
}
- if (map.Exists(Base::PIC)) {
- options->compile_pic_ = true;
- }
map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
@@ -58,6 +55,9 @@
if (map.Exists(Base::Debuggable)) {
options->debuggable_ = true;
}
+ if (map.Exists(Base::Baseline)) {
+ options->baseline_ = true;
+ }
map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
@@ -80,6 +80,7 @@
if (map.Exists(Base::CountHotnessInCompiledCode)) {
options->count_hotness_in_compiled_code_ = true;
}
+ map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_);
if (map.Exists(Base::DumpTimings)) {
options->dump_timings_ = true;
@@ -106,9 +107,6 @@
.template WithType<std::string>()
.IntoKey(Map::CompilerFilter)
- .Define("--compile-pic")
- .IntoKey(Map::PIC)
-
.Define("--huge-method-max=_")
.template WithType<unsigned int>()
.IntoKey(Map::HugeMethodMaxThreshold)
@@ -159,6 +157,9 @@
.Define("--debuggable")
.IntoKey(Map::Debuggable)
+ .Define("--baseline")
+ .IntoKey(Map::Baseline)
+
.Define("--top-k-profile-threshold=_")
.template WithType<double>().WithRange(0.0, 100.0)
.IntoKey(Map::TopKProfileThreshold)
@@ -184,6 +185,11 @@
.template WithType<std::string>()
.IntoKey(Map::RegisterAllocationStrategy)
+ .Define("--resolve-startup-const-strings=_")
+ .template WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(Map::ResolveStartupConstStrings)
+
.Define("--verbose-methods=_")
.template WithType<ParseStringList<','>>()
.IntoKey(Map::VerboseMethods);
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 529d43f..a593240 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -48,16 +48,18 @@
COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo)
COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
COMPILER_OPTIONS_KEY (Unit, Debuggable)
+COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
+COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
// TODO: Add type parser.
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
-COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
+COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
COMPILER_OPTIONS_KEY (Unit, CountHotnessInCompiledCode)
COMPILER_OPTIONS_KEY (Unit, DumpTimings)
COMPILER_OPTIONS_KEY (Unit, DumpPassTimings)
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index fd17364..80c0a68 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -50,7 +50,7 @@
// which always points to the first source statement.
static constexpr const uint32_t kDexPc = 0;
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3fc559e..bc8641a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -188,7 +188,7 @@
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(
- self, code_cache, method, osr, jit_logger_.get());
+ self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get());
}
// Trim maps to reduce memory usage.
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 62e8e02..09376dd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -219,12 +219,6 @@
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
- // Offsets into data structures
- // TODO: if cross compiling these offsets are for the host not the target
- const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
- const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
- const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
// 1. Build the frame saving all callee saves, Method*, and PC return address.
const size_t frame_size(main_jni_conv->FrameSize()); // Excludes outgoing args.
ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 165fc60..8b395a0 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -87,7 +87,7 @@
private:
// Padding to ensure longs and doubles are not split in o32.
size_t padding_;
- size_t use_fp_arg_registers_;
+ bool use_fp_arg_registers_;
DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
};
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 7c29df8..e15161e 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -598,9 +598,10 @@
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
HInstruction* new_array = new (allocator) HNewArray(
- constant_10,
- constant_10,
- 0);
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (allocator) HGoto());
@@ -977,7 +978,11 @@
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (GetAllocator()) HGoto());
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index a90ff3f..d8e442c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -197,7 +197,7 @@
return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
}
- void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -230,29 +230,31 @@
};
void CodeGenerator::CodeGenerationData::EmitJitRoots(
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
+ DCHECK(roots->empty());
+ roots->reserve(GetNumberOfJitRoots());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
// Update the `roots` with the string, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsString());
entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(
- reinterpret_cast<mirror::String*>(roots->Get(index)));
+ class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
++index;
}
for (auto& entry : jit_class_roots_) {
// Update the `roots` with the class, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsClass());
entry.second = index;
++index;
}
@@ -1489,7 +1491,12 @@
<< " instruction->GetSideEffects().ToString()="
<< instruction->GetSideEffects().ToString();
} else {
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
+ // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
+ // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
+ // if execution never returns to the compiled code from a GC point this restriction is
+ // unnecessary - in particular for fatal slow paths which might trigger GC.
+ DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
+ instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
// When (non-Baker) read barriers are enabled, some instructions
// use a slow path to emit a read barrier, which does not trigger
// GC.
@@ -1640,25 +1647,18 @@
}
void CodeGenerator::EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data) {
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
code_generation_data_->EmitJitRoots(roots);
EmitJitRootPatches(code, roots_data);
}
-QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
- ScopedObjectAccess soa(Thread::Current());
- if (array_klass == nullptr) {
- // This can only happen for non-primitive arrays, as primitive arrays can always
- // be resolved.
- return kQuickAllocArrayResolved32;
- }
-
- switch (array_klass->GetComponentSize()) {
- case 1: return kQuickAllocArrayResolved8;
- case 2: return kQuickAllocArrayResolved16;
- case 4: return kQuickAllocArrayResolved32;
- case 8: return kQuickAllocArrayResolved64;
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
+ switch (new_array->GetComponentSizeShift()) {
+ case 0: return kQuickAllocArrayResolved8;
+ case 1: return kQuickAllocArrayResolved16;
+ case 2: return kQuickAllocArrayResolved32;
+ case 3: return kQuickAllocArrayResolved64;
}
LOG(FATAL) << "Unreachable";
return kQuickAllocArrayResolved;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e77d621..3f56078 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -356,8 +356,8 @@
// Fills the `literals` array with literals collected during code generation.
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data)
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -622,7 +622,7 @@
// otherwise return a fall-back info that should be used instead.
virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) = 0;
+ ArtMethod* method) = 0;
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(
@@ -636,7 +636,7 @@
virtual void GenerateNop() = 0;
- static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
protected:
// Patch info used for recording locations of required linker patches and their targets,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d56f7aa..3f4fb15 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4053,7 +4053,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On ARM64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -4350,7 +4350,7 @@
// Add ADD with its PC-relative type patch.
vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
EmitAddPlaceholder(add_label, reg.X(), reg.X());
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
EmitAdrpPlaceholder(adrp_label, reg.X());
@@ -5004,10 +5004,8 @@
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e7a20b..1ba58b1 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -557,7 +557,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3580975..d5b734d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2677,6 +2677,18 @@
const Location first = locations->InAt(0);
const Location out = locations->Out();
const Location second = locations->InAt(1);
+
+ // In the unlucky case the output of this instruction overlaps
+ // with an input of an "emitted-at-use-site" condition, and
+ // the output of this instruction is not one of its inputs, we'll
+ // need to fallback to branches instead of conditional ARM instructions.
+ bool output_overlaps_with_condition_inputs =
+ !IsBooleanValueOrMaterializedCondition(condition) &&
+ !out.Equals(first) &&
+ !out.Equals(second) &&
+ (condition->GetLocations()->InAt(0).Equals(out) ||
+ condition->GetLocations()->InAt(1).Equals(out));
+ DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
Location src;
if (condition->IsIntConstant()) {
@@ -2690,7 +2702,7 @@
return;
}
- if (!DataType::IsFloatingPointType(type)) {
+ if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) {
bool invert = false;
if (out.Equals(second)) {
@@ -2762,6 +2774,7 @@
vixl32::Label* false_target = nullptr;
vixl32::Label* true_target = nullptr;
vixl32::Label select_end;
+ vixl32::Label other_case;
vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end);
if (out.Equals(second)) {
@@ -2772,12 +2785,21 @@
src = second;
if (!out.Equals(first)) {
- codegen_->MoveLocation(out, first, type);
+ if (output_overlaps_with_condition_inputs) {
+ false_target = &other_case;
+ } else {
+ codegen_->MoveLocation(out, first, type);
+ }
}
}
GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
codegen_->MoveLocation(out, src, type);
+ if (output_overlaps_with_condition_inputs) {
+ __ B(target);
+ __ Bind(&other_case);
+ codegen_->MoveLocation(out, first, type);
+ }
if (select_end.IsReferenced()) {
__ Bind(&select_end);
@@ -2876,31 +2898,16 @@
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
- // Handle the long/FP comparisons made in instruction simplification.
- switch (cond->InputAt(0)->GetType()) {
- case DataType::Type::kInt64:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- default:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ const DataType::Type type = cond->InputAt(0)->GetType();
+ if (DataType::IsFloatingPointType(type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ }
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -4964,7 +4971,7 @@
__ Rrx(o_l, low);
}
} else {
- DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+ DCHECK(0 <= shift_value && shift_value < 32) << shift_value;
if (op->IsShl()) {
__ Lsl(o_h, high, shift_value);
__ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
@@ -5036,10 +5043,8 @@
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -8650,7 +8655,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8907,7 +8912,7 @@
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageIntrinsicPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageRelRoPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 33502d4..5edca87 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -547,7 +547,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d74a7a7..c6d0f3f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1766,7 +1766,7 @@
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
__ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
@@ -7964,7 +7964,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8702,10 +8702,8 @@
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index bf95893..5080731 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -563,7 +563,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7c89808..039b3ca 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1680,7 +1680,7 @@
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
__ Daddiu(reg, AT, /* placeholder */ 0x5678);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
@@ -6059,7 +6059,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On MIPS64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -6633,10 +6633,8 @@
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ddc154d..52f3a62 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -541,7 +541,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 6d135a9..e79a96b 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -1277,6 +1277,74 @@
}
}
+void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ DCHECK(instruction->GetPackedType() == DataType::Type::kInt32);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+
+ // For Int8 and Uint8 we need a temp register.
+ if (DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ VRegister acc = VRegisterFrom(locations->InAt(0));
+ VRegister left = VRegisterFrom(locations->InAt(1));
+ VRegister right = VRegisterFrom(locations->InAt(2));
+ HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
+ HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
+ DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+ HVecOperation::ToSignedType(b->GetPackedType()));
+ DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32);
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+
+ size_t inputs_data_size = DataType::Size(a->GetPackedType());
+ switch (inputs_data_size) {
+ case 1u: {
+ DCHECK_EQ(16u, a->GetVectorLength());
+ VRegister tmp = VRegisterFrom(locations->GetTemp(0));
+ if (instruction->IsZeroExtending()) {
+ // TODO: Use Armv8.4-A UDOT instruction when it is available.
+ __ Umull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Umull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ } else {
+ // TODO: Use Armv8.4-A SDOT instruction when it is available.
+ __ Smull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Smull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ }
+ break;
+ }
+ case 2u:
+ DCHECK_EQ(8u, a->GetVectorLength());
+ if (instruction->IsZeroExtending()) {
+ __ Umlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Umlal2(acc.V4S(), left.V8H(), right.V8H());
+ } else {
+ __ Smlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Smlal2(acc.V4S(), left.V8H(), right.V8H());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size;
+ }
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1354,6 +1422,7 @@
Register scratch;
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1385,7 +1454,6 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kFloat32:
case DataType::Type::kInt64:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 7b66b17..62b6c4e 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -854,6 +854,14 @@
}
}
+void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word
// size equals to 4).
static bool IsWordAligned(HVecMemoryOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index df0e148..24f4fb2 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -1274,6 +1274,14 @@
}
}
+void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index de354b6..972c49e 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -1272,6 +1272,14 @@
}
}
+void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 086ae07..c52ecc7 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -1143,6 +1143,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1205,6 +1213,7 @@
XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1232,7 +1241,6 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64:
DCHECK_LE(2u, instruction->GetVectorLength());
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4d31ab6..87d0106 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -1116,6 +1116,14 @@
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1178,6 +1186,7 @@
XmmRegister reg = locations->Out().AsFpuRegister<XmmRegister>();
bool is_aligned16 = instruction->GetAlignment().IsAlignedAt(16);
switch (instruction->GetPackedType()) {
+ case DataType::Type::kInt16: // (short) s.charAt(.) can yield HVecLoad/Int16/StringCharAt.
case DataType::Type::kUint16:
DCHECK_EQ(8u, instruction->GetVectorLength());
// Special handling of compressed/uncompressed string load.
@@ -1205,7 +1214,6 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- case DataType::Type::kInt16:
case DataType::Type::kInt32:
case DataType::Type::kInt64:
DCHECK_LE(2u, instruction->GetVectorLength());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6a27081..9f34a51 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4525,10 +4525,8 @@
}
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4785,7 +4783,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -4988,7 +4986,7 @@
invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
__ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
@@ -8301,7 +8299,7 @@
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6154771..93b0461 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -410,7 +410,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 489652b..dac2dba 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -978,7 +978,7 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -1125,7 +1125,7 @@
if (GetCompilerOptions().IsBootImage()) {
__ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
RecordBootImageIntrinsicPatch(boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
__ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
@@ -4371,10 +4371,8 @@
}
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -7542,7 +7540,7 @@
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f77a5c8..1e71397 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -409,7 +409,7 @@
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 3cb8bf2..3a1a9e0 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -78,7 +78,7 @@
VisitSetLocation(instruction, value);
}
- void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) {
+ void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) override {
// Pessimize: Merge all fences.
MergeCandidateFences();
}
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 5ac6e46..3cbcc9e 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -231,6 +231,21 @@
}
}
+ static Type ToUnsigned(Type type) {
+ switch (type) {
+ case Type::kInt8:
+ return Type::kUint8;
+ case Type::kInt16:
+ return Type::kUint16;
+ case Type::kInt32:
+ return Type::kUint32;
+ case Type::kInt64:
+ return Type::kUint64;
+ default:
+ return type;
+ }
+ }
+
static const char* PrettyDescriptor(Type type);
private:
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 31db8c2..a1af2be 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -106,8 +106,7 @@
}
}
-typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
- DisassemblerOptions* options);
+using create_disasm_prototype = Disassembler*(InstructionSet, DisassemblerOptions*);
class HGraphVisualizerDisassembler {
public:
HGraphVisualizerDisassembler(InstructionSet instruction_set,
@@ -564,6 +563,14 @@
StartAttributeStream("kind") << instruction->GetOpKind();
}
+ void VisitVecDotProd(HVecDotProd* instruction) override {
+ VisitVecOperation(instruction);
+ DataType::Type arg_type = instruction->InputAt(1)->AsVecOperation()->GetPackedType();
+ StartAttributeStream("type") << (instruction->IsZeroExtending() ?
+ DataType::ToUnsigned(arg_type) :
+ DataType::ToSigned(arg_type));
+ }
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index e5bc6ef..223e08e 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -701,7 +701,11 @@
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ x_,
+ /* length= */ x_,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
entry_block_->AddInstruction(new_array);
HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3ba7414..7f94a29 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -1296,9 +1296,7 @@
// If invoke_instruction is devirtualized to a different method, give intrinsics
// another chance before we try to inline it.
- bool wrong_invoke_type = false;
- if (invoke_instruction->GetResolvedMethod() != method &&
- IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+ if (invoke_instruction->GetResolvedMethod() != method && method->IsIntrinsic()) {
MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
if (invoke_instruction->IsInvokeInterface()) {
// We don't intrinsify an invoke-interface directly.
@@ -1311,6 +1309,7 @@
invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
method,
method->GetMethodIndex());
+ DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
HInputsRef inputs = invoke_instruction->GetInputs();
for (size_t index = 0; index != inputs.size(); ++index) {
new_invoke->SetArgumentAt(index, inputs[index]);
@@ -1320,14 +1319,11 @@
if (invoke_instruction->GetType() == DataType::Type::kReference) {
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
- // Run intrinsic recognizer again to set new_invoke's intrinsic.
- IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
- DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
return_replacement = new_invoke;
// invoke_instruction is replaced with new_invoke.
should_remove_invoke_instruction = true;
} else {
- // invoke_instruction is intrinsified and stays.
+ invoke_instruction->SetResolvedMethod(method);
}
} else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
@@ -2022,13 +2018,9 @@
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
- HSharpening sharpening(callee_graph, codegen_);
InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
- &intrinsics,
- &sharpening,
&simplify,
&fold,
&dce,
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e555d0d..bd94789 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -466,22 +466,17 @@
}
ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
- // The callback gets called when the line number changes.
- // In other words, it marks the start of new java statement.
- struct Callback {
- static bool Position(void* ctx, const DexFile::PositionInfo& entry) {
- static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_);
- return false;
- }
- };
ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
code_item_accessor_.InsnsSizeInCodeUnits(),
/* expandable */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- dex_file_->DecodeDebugPositionInfo(code_item_accessor_.DebugInfoOffset(),
- Callback::Position,
- locations);
+ // The visitor gets called when the line number changes.
+ // In other words, it marks the start of new java statement.
+ code_item_accessor_.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ locations->SetBit(entry.address_);
+ return false;
+ });
// Instruction-specific tweaks.
for (const DexInstructionPcPair& inst : code_item_accessor_) {
switch (inst->Opcode()) {
@@ -983,11 +978,8 @@
}
}
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
- HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u
- };
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+ HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
@@ -1505,21 +1497,22 @@
// to be visited once it is clear whether it has remaining uses.
if (arg_this->IsNewInstance()) {
ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
- // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
- for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
- if ((*current_locals_)[vreg] == arg_this) {
- (*current_locals_)[vreg] = invoke;
- }
- }
} else {
DCHECK(arg_this->IsPhi());
// We can get a phi as input of a String.<init> if there is a loop between the
// allocation and the String.<init> call. As we don't know which other phis might alias
- // with `arg_this`, we keep a record of these phis and will analyze their inputs and
- // uses once the inputs and users are populated (in ssa_builder.cc).
- // Note: we only do this for phis, as it is a somewhat more expensive operation than
- // what we're doing above when the input is the `HNewInstance`.
- ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
+ // with `arg_this`, we keep a record of those invocations so we can later replace
+ // the allocation with the invocation.
+ // Add the actual 'this' input so the analysis knows what is the allocation instruction.
+ // The input will be removed during the analysis.
+ invoke->AddInput(arg_this);
+ ssa_builder_->AddUninitializedStringPhi(invoke);
+ }
+ // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+ for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+ if ((*current_locals_)[vreg] == arg_this) {
+ (*current_locals_)[vreg] = invoke;
+ }
}
return true;
}
@@ -1849,15 +1842,27 @@
graph_->SetHasBoundsChecks(true);
}
+HNewArray* HInstructionBuilder::BuildNewArray(uint32_t dex_pc,
+ dex::TypeIndex type_index,
+ HInstruction* length) {
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(type_index));
+ DCHECK_EQ(descriptor[0], '[');
+ size_t component_type_shift = Primitive::ComponentSizeShift(Primitive::GetType(descriptor[1]));
+
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc, component_type_shift);
+ AppendInstruction(new_array);
+ return new_array;
+}
+
HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
const InstructionOperands& operands) {
const size_t number_of_operands = operands.GetNumberOfOperands();
HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(object);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
DCHECK_EQ(descriptor[0], '[') << descriptor;
char primitive = descriptor[1];
@@ -1870,13 +1875,13 @@
for (size_t i = 0; i < number_of_operands; ++i) {
HInstruction* value = LoadLocal(operands.GetOperand(i), type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(new_array, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
- latest_result_ = object;
+ latest_result_ = new_array;
- return object;
+ return new_array;
}
template <typename T>
@@ -2899,10 +2904,8 @@
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
- HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
break;
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index af1b86c..2ab2139 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -179,6 +179,9 @@
uint32_t call_site_idx,
const InstructionOperands& operands);
+ // Builds a new array node.
+ HNewArray* BuildNewArray(uint32_t dex_pc, dex::TypeIndex type_index, HInstruction* length);
+
// Builds a new array node and the instructions that fill it.
HNewArray* BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2757f7b..2b6ae20 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -1181,8 +1181,7 @@
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
DataType::Type result_type = instruction->GetResultType();
- if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
- // Remove the implicit conversion; this includes conversion to the same type.
+ if (instruction->IsImplicitConversion()) {
instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -1317,7 +1316,7 @@
}
HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg();
- if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) {
+ if (left_is_neg != right_is_neg && neg->HasOnlyOneNonEnvironmentUse()) {
// Replace code looking like
// NEG tmp, b
// ADD dst, a, tmp
@@ -2146,22 +2145,6 @@
ReferenceTypeInfo argument_rti = argument->GetReferenceTypeInfo();
if (argument_rti.IsValid() && argument_rti.IsStringClass()) {
optimizations.SetArgumentIsString();
- } else if (kUseReadBarrier) {
- DCHECK(instruction->GetResolvedMethod() != nullptr);
- DCHECK(instruction->GetResolvedMethod()->GetDeclaringClass()->IsStringClass() ||
- // Object.equals() can be devirtualized to String.equals().
- instruction->GetResolvedMethod()->GetDeclaringClass()->IsObjectClass());
- Runtime* runtime = Runtime::Current();
- // For AOT, we always assume that the boot image shall contain the String.class and
- // we do not need a read barrier for boot image classes as they are non-moveable.
- // For JIT, check if we actually have a boot image; if we do, the String.class
- // should also be non-moveable.
- if (runtime->IsAotCompiler() || runtime->GetHeap()->HasBootImageSpace()) {
- DCHECK(runtime->IsAotCompiler() ||
- !runtime->GetHeap()->IsMovableObject(
- instruction->GetResolvedMethod()->GetDeclaringClass()));
- optimizations.SetNoReadBarrierForStringClass();
- }
}
}
}
@@ -2306,7 +2289,7 @@
// the invoke, as we would need to look it up in the current dex file, and it
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
- HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+ invoke->SetDispatchInfo(HSharpening::SharpenInvokeStaticOrDirect(method, codegen_));
}
}
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 21efe11..619cd8e 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -32,179 +32,6 @@
namespace art {
-// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
-#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- static_assert( \
- static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
- "Instrinsics enumeration space overflow.");
-#include "intrinsics_list.h"
- INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
-#undef INTRINSICS_LIST
-#undef CHECK_INTRINSICS_ENUM_VALUES
-
-// Function that returns whether an intrinsic is static/direct or virtual.
-static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kInterface; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return IsStatic;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kInterface;
-}
-
-// Function that returns whether an intrinsic needs an environment or not.
-static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCache(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return NeedsEnvironmentOrCache;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kNeedsEnvironmentOrCache;
-}
-
-// Function that returns whether an intrinsic has side effects.
-static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kAllSideEffects;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return SideEffects;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kAllSideEffects;
-}
-
-// Function that returns whether an intrinsic can throw exceptions.
-static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kCanThrow;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return Exceptions;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kCanThrow;
-}
-
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
- //
- // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
- // failure occured. We might be in a situation where we have inlined a method that calls an
- // intrinsic, but that method is in a different dex file on which we do not have a
- // verified_method that would have helped the compiler driver sharpen the call. In that case,
- // make sure that the intrinsic is actually for some final method (or in a final class), as
- // otherwise the intrinsics setup is broken.
- //
- // For the last direction, we have intrinsics for virtual functions that will perform a check
- // inline. If the precise type is known, however, the instruction will be sharpened to an
- // InvokeStaticOrDirect.
- InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
- InvokeType invoke_type = invoke->GetInvokeType();
-
- switch (intrinsic_type) {
- case kStatic:
- return (invoke_type == kStatic);
-
- case kDirect:
- if (invoke_type == kDirect) {
- return true;
- }
- if (invoke_type == kVirtual) {
- ArtMethod* art_method = invoke->GetResolvedMethod();
- return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
- }
- return false;
-
- case kVirtual:
- // Call might be devirtualized.
- return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
-
- case kSuper:
- case kInterface:
- case kPolymorphic:
- case kCustom:
- return false;
- }
- LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
- UNREACHABLE();
-}
-
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
- ArtMethod* art_method,
- /*out*/ bool* wrong_invoke_type) {
- if (art_method == nullptr) {
- art_method = invoke->GetResolvedMethod();
- }
- *wrong_invoke_type = false;
- if (art_method == nullptr || !art_method->IsIntrinsic()) {
- return false;
- }
-
- // TODO: b/65872996 The intent is that polymorphic signature methods should
- // be compiler intrinsics. At present, they are only interpreter intrinsics.
- if (art_method->IsPolymorphicSignature()) {
- return false;
- }
-
- Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
- if (CheckInvokeType(intrinsic, invoke) == false) {
- *wrong_invoke_type = true;
- return false;
- }
-
- invoke->SetIntrinsic(intrinsic,
- NeedsEnvironmentOrCache(intrinsic),
- GetSideEffects(intrinsic),
- GetExceptions(intrinsic));
- return true;
-}
-
-bool IntrinsicsRecognizer::Run() {
- bool didRecognize = false;
- ScopedObjectAccess soa(Thread::Current());
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
- inst_it.Advance()) {
- HInstruction* inst = inst_it.Current();
- if (inst->IsInvoke()) {
- bool wrong_invoke_type = false;
- if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
- didRecognize = true;
- MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
- } else if (wrong_invoke_type) {
- LOG(WARNING)
- << "Found an intrinsic with unexpected invoke type: "
- << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
- << inst->DebugName();
- }
- }
- }
- }
- return didRecognize;
-}
-
std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
switch (intrinsic) {
case Intrinsics::kNone:
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 06e2fbb..8245453 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -34,28 +34,6 @@
static constexpr uint32_t kNanFloat = 0x7fc00000U;
static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
-// Recognize intrinsics from HInvoke nodes.
-class IntrinsicsRecognizer : public HOptimization {
- public:
- IntrinsicsRecognizer(HGraph* graph,
- OptimizingCompilerStats* stats,
- const char* name = kIntrinsicsRecognizerPassName)
- : HOptimization(graph, name, stats) {}
-
- bool Run() override;
-
- // Static helper that recognizes intrinsic call. Returns true on success.
- // If it fails due to invoke type mismatch, wrong_invoke_type is set.
- // Useful to recognize intrinsics on individual calls outside this full pass.
- static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
-
- private:
- DISALLOW_COPY_AND_ASSIGN(IntrinsicsRecognizer);
-};
-
class IntrinsicVisitor : public ValueObject {
public:
virtual ~IntrinsicVisitor() {}
@@ -219,7 +197,6 @@
INTRINSIC_OPTIMIZATION(ArgumentNotNull, 0);
INTRINSIC_OPTIMIZATION(ArgumentIsString, 1);
- INTRINSIC_OPTIMIZATION(NoReadBarrierForStringClass, 2);
private:
DISALLOW_COPY_AND_ASSIGN(StringEqualsOptimizations);
@@ -263,11 +240,14 @@
// Defines a list of unreached intrinsics: that is, method calls that are recognized as
// an intrinsic, and then always converted into HIR instructions before they reach any
-// architecture-specific intrinsics code generator.
+// architecture-specific intrinsics code generator. This only applies to non-baseline
+// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
- << " should have been converted to HIR"; \
+ if (!codegen_->GetCompilerOptions().IsBaseline()) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+ } \
} \
void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1abfcb0..7684dc7 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1398,13 +1398,6 @@
}
void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 1127fb8..38e4c89 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -1092,7 +1092,8 @@
assembler->MaybeUnpoisonHeapReference(tmp);
}
__ Subs(tmp, tmp, expected);
- __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint);
+ static_cast<vixl32::MacroAssembler*>(assembler->GetVIXLAssembler())->
+ B(ne, failure, /* hint= */ (failure == loop_exit) ? kNear : kBranchWithoutHint);
if (type == DataType::Type::kReference) {
assembler->MaybePoisonHeapReference(value);
}
@@ -1458,13 +1459,6 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 771714b..6f7f5e4 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1516,13 +1516,6 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 4a1bd5b..2eb2529 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1369,13 +1369,6 @@
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d33c0c3..3504d7a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -922,13 +922,6 @@
}
void IntrinsicLocationsBuilderX86::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index ae88974..96f6eaa 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1230,13 +1230,6 @@
}
void IntrinsicLocationsBuilderX86_64::VisitStringEquals(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier &&
- !StringEqualsOptimizations(invoke).GetArgumentIsString() &&
- !StringEqualsOptimizations(invoke).GetNoReadBarrierForStringClass()) {
- // No support for this odd case (String class is moveable, not in the boot image).
- return;
- }
-
LocationSummary* locations =
new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
locations->SetInAt(0, Location::RequiresRegister());
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 7f71745..b33d0f4 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -692,7 +692,7 @@
VisitSetLocation(instruction, idx, instruction->InputAt(2));
}
- void VisitDeoptimize(HDeoptimize* instruction) {
+ void VisitDeoptimize(HDeoptimize* instruction) override {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7d66155..12b180d 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -351,7 +351,10 @@
// Translates vector operation to reduction kind.
static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
- if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) {
+ if (reduction->IsVecAdd() ||
+ reduction->IsVecSub() ||
+ reduction->IsVecSADAccumulate() ||
+ reduction->IsVecDotProd()) {
return HVecReduce::kSum;
}
LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
@@ -431,6 +434,23 @@
}
}
+// Returns the narrower type out of instructions a and b types.
+static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
+ DataType::Type type = a->GetType();
+ if (DataType::Size(b->GetType()) < DataType::Size(type)) {
+ type = b->GetType();
+ }
+ if (a->IsTypeConversion() &&
+ DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = a->InputAt(0)->GetType();
+ }
+ if (b->IsTypeConversion() &&
+ DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = b->InputAt(0)->GetType();
+ }
+ return type;
+}
+
//
// Public methods.
//
@@ -1289,6 +1309,7 @@
DataType::Type type = instruction->GetType();
// Recognize SAD idiom or direct reduction.
if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
+ VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
(TrySetVectorType(type, &restrictions) &&
VectorizeUse(node, instruction, generate_code, type, restrictions))) {
if (generate_code) {
@@ -1531,11 +1552,11 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv | kNoReduction;
+ *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
return TrySetVectorLength(4);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoWideSAD;
@@ -1580,12 +1601,23 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |=
- kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoMul |
+ kNoDiv |
+ kNoShift |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD |
+ kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoDiv |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD|
+ kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoSAD;
@@ -1610,11 +1642,11 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -1639,11 +1671,11 @@
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -2071,18 +2103,7 @@
HInstruction* r = a;
HInstruction* s = b;
bool is_unsigned = false;
- DataType::Type sub_type = a->GetType();
- if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->GetType();
- }
- if (a->IsTypeConversion() &&
- DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = a->InputAt(0)->GetType();
- }
- if (b->IsTypeConversion() &&
- DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->InputAt(0)->GetType();
- }
+ DataType::Type sub_type = GetNarrowerType(a, b);
if (reduction_type != sub_type &&
(!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
return false;
@@ -2123,6 +2144,75 @@
return false;
}
+// Method recognises the following dot product idiom:
+// q += a * b for operands a, b whose type is narrower than the reduction one.
+// Provided that the operands have the same type or are promoted to a wider form.
+// Since this may involve a vector length change, the idiom is handled by going directly
+// to a dot product node (rather than relying combining finer grained nodes later).
+bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type reduction_type,
+ uint64_t restrictions) {
+ if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
+ return false;
+ }
+
+ HInstruction* q = instruction->InputAt(0);
+ HInstruction* v = instruction->InputAt(1);
+ if (!v->IsMul() || v->GetType() != reduction_type) {
+ return false;
+ }
+
+ HInstruction* a = v->InputAt(0);
+ HInstruction* b = v->InputAt(1);
+ HInstruction* r = a;
+ HInstruction* s = b;
+ DataType::Type op_type = GetNarrowerType(a, b);
+ bool is_unsigned = false;
+
+ if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
+ return false;
+ }
+ op_type = HVecOperation::ToProperType(op_type, is_unsigned);
+
+ if (!TrySetVectorType(op_type, &restrictions) ||
+ HasVectorRestrictions(restrictions, kNoDotProd)) {
+ return false;
+ }
+
+ DCHECK(r != nullptr && s != nullptr);
+ // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
+ // idiomatic operation. Sequential code uses the original scalar expressions.
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = a;
+ s = b;
+ }
+ if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, r, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, s, generate_code, op_type, restrictions)) {
+ if (generate_code) {
+ if (vector_mode_ == kVector) {
+ vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
+ global_allocator_,
+ vector_map_->Get(q),
+ vector_map_->Get(r),
+ vector_map_->Get(s),
+ reduction_type,
+ is_unsigned,
+ GetOtherVL(reduction_type, op_type, vector_length_),
+ kNoDexPc));
+ MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
+ } else {
+ GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
+ GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
//
// Vectorization heuristics.
//
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 2b202fd..1a842c4 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -82,6 +82,7 @@
kNoReduction = 1 << 9, // no reduction
kNoSAD = 1 << 10, // no sum of absolute differences (SAD)
kNoWideSAD = 1 << 11, // no sum of absolute differences (SAD) with operand widening
+ kNoDotProd = 1 << 12, // no dot product
};
/*
@@ -217,6 +218,11 @@
bool generate_code,
DataType::Type type,
uint64_t restrictions);
+ bool VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type type,
+ uint64_t restrictions);
// Vectorization heuristics.
Alignment ComputeAlignment(HInstruction* offset,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 79a7e2c..aad06b9 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -3180,4 +3180,77 @@
}
}
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, InvokeType, _, SideEffects, Exceptions, ...) \
+ static_assert( \
+ static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+ "Instrinsics enumeration space overflow.");
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
+// Function that returns whether an intrinsic needs an environment or not.
+static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCacheIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return NeedsEnvOrCache;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kNeedsEnvironmentOrCache;
+}
+
+// Function that returns whether an intrinsic has side effects.
+static inline IntrinsicSideEffects GetSideEffectsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kAllSideEffects;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return SideEffects;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kAllSideEffects;
+}
+
+// Function that returns whether an intrinsic can throw exceptions.
+static inline IntrinsicExceptions GetExceptionsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kCanThrow;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return Exceptions;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kCanThrow;
+}
+
+void HInvoke::SetResolvedMethod(ArtMethod* method) {
+ // TODO: b/65872996 The intent is that polymorphic signature methods should
+ // be compiler intrinsics. At present, they are only interpreter intrinsics.
+ if (method != nullptr &&
+ method->IsIntrinsic() &&
+ !method->IsPolymorphicSignature()) {
+ Intrinsics intrinsic = static_cast<Intrinsics>(method->GetIntrinsic());
+ SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCacheIntrinsic(intrinsic),
+ GetSideEffectsIntrinsic(intrinsic),
+ GetExceptionsIntrinsic(intrinsic));
+ }
+ resolved_method_ = method;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 5feffa0..6ebe89e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -29,6 +29,7 @@
#include "base/quasi_atomic.h"
#include "base/stl_util.h"
#include "base/transform_array_ref.h"
+#include "art_method.h"
#include "data_type.h"
#include "deoptimization_kind.h"
#include "dex/dex_file.h"
@@ -128,6 +129,7 @@
kAnalysisInvalidBytecode,
kAnalysisFailThrowCatchLoop,
kAnalysisFailAmbiguousArrayOp,
+ kAnalysisFailIrreducibleLoopAndStringInit,
kAnalysisSuccess,
};
@@ -1453,6 +1455,7 @@
M(VecSetScalars, VecOperation) \
M(VecMultiplyAccumulate, VecOperation) \
M(VecSADAccumulate, VecOperation) \
+ M(VecDotProd, VecOperation) \
M(VecLoad, VecMemoryOperation) \
M(VecStore, VecMemoryOperation) \
@@ -1626,6 +1629,21 @@
* the same, and any reference read depends on any reference read without
* further regard of its type).
*
+ * kDependsOnGCBit is defined in the following way: instructions with kDependsOnGCBit must not be
+ * alive across the point where garbage collection might happen.
+ *
+ * Note: Instructions with kCanTriggerGCBit do not depend on each other.
+ *
+ * kCanTriggerGCBit must be used for instructions for which GC might happen on the path across
+ * those instructions from the compiler perspective (between this instruction and the next one
+ * in the IR).
+ *
+ * Note: Instructions which can cause GC only on a fatal slow path do not need
+ * kCanTriggerGCBit as the execution never returns to the instruction next to the exceptional
+ * one. However the execution may return to compiled code if there is a catch block in the
+ * current method; for this purpose the TryBoundary exit instruction has kCanTriggerGCBit
+ * set.
+ *
* The internal representation uses 38-bit and is described in the table below.
* The first line indicates the side effect, and for field/array accesses the
* second line indicates the type of the access (in the order of the
@@ -1698,10 +1716,17 @@
return SideEffects(TypeFlag(type, kArrayReadOffset));
}
+ // Returns whether GC might happen across this instruction from the compiler perspective so
+ // the next instruction in the IR would see that.
+ //
+ // See the SideEffect class comments.
static SideEffects CanTriggerGC() {
return SideEffects(1ULL << kCanTriggerGCBit);
}
+ // Returns whether the instruction must not be alive across a GC point.
+ //
+ // See the SideEffect class comments.
static SideEffects DependsOnGC() {
return SideEffects(1ULL << kDependsOnGCBit);
}
@@ -3136,8 +3161,15 @@
kLast = kExit
};
+ // SideEffects::CanTriggerGC prevents instructions with SideEffects::DependOnGC to be alive
+ // across the catch block entering edges as GC might happen during throwing an exception.
+ // TryBoundary with BoundaryKind::kExit is conservatively used for that as there is no
+ // HInstruction which a catch block must start from.
explicit HTryBoundary(BoundaryKind kind, uint32_t dex_pc = kNoDexPc)
- : HExpression(kTryBoundary, SideEffects::None(), dex_pc) {
+ : HExpression(kTryBoundary,
+ (kind == BoundaryKind::kExit) ? SideEffects::CanTriggerGC()
+ : SideEffects::None(),
+ dex_pc) {
SetPackedField<BoundaryKindField>(kind);
}
@@ -4293,7 +4325,7 @@
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
- void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
+ void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -4325,12 +4357,14 @@
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
- resolved_method_(resolved_method),
dex_method_index_(dex_method_index),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
+ // Check mutator lock, constructors lack annotalysis support.
+ Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
+ SetResolvedMethod(resolved_method);
}
DEFAULT_COPY_CONSTRUCTOR(Invoke);
@@ -4504,8 +4538,7 @@
allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit, and potentially
- // one other if the method is a string factory.
+ // potentially one other if the clinit check is explicit.
(NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
(clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
return_type,
@@ -4816,10 +4849,11 @@
class HNewArray final : public HExpression<2> {
public:
- HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+ HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cls);
SetRawInputAt(1, length);
+ SetPackedField<ComponentSizeShiftField>(component_size_shift);
}
bool IsClonable() const override { return true; }
@@ -4841,10 +4875,23 @@
return InputAt(1);
}
+ size_t GetComponentSizeShift() {
+ return GetPackedField<ComponentSizeShiftField>();
+ }
+
DECLARE_INSTRUCTION(NewArray);
protected:
DEFAULT_COPY_CONSTRUCTOR(NewArray);
+
+ private:
+ static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
+ static constexpr size_t kNumberOfNewArrayPackedBits =
+ kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
+ static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+ using ComponentSizeShiftField =
+ BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShift>;
};
class HAdd final : public HBinaryOperation {
@@ -5163,9 +5210,10 @@
class HDivZeroCheck final : public HExpression<1> {
public:
// `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException`
- // constructor.
+ // constructor. However it can only do it on a fatal slow path so execution never returns to the
+ // instruction following the current one; thus 'SideEffects::None()' is used.
HDivZeroCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(kDivZeroCheck, value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
+ : HExpression(kDivZeroCheck, value->GetType(), SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -5626,6 +5674,10 @@
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
+ // Return whether the conversion is implicit. This includes conversion to the same type.
+ bool IsImplicitConversion() const {
+ return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
+ }
// Try to statically evaluate the conversion and return a HConstant
// containing the result. If the input cannot be converted, return nullptr.
@@ -5642,9 +5694,10 @@
class HNullCheck final : public HExpression<1> {
public:
// `HNullCheck` can trigger GC, as it may call the `NullPointerException`
- // constructor.
+ // constructor. However it can only do it on a fatal slow path so execution never returns to the
+ // instruction following the current one; thus 'SideEffects::None()' is used.
HNullCheck(HInstruction* value, uint32_t dex_pc)
- : HExpression(kNullCheck, value->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
+ : HExpression(kNullCheck, value->GetType(), SideEffects::None(), dex_pc) {
SetRawInputAt(0, value);
}
@@ -6071,12 +6124,13 @@
class HBoundsCheck final : public HExpression<2> {
public:
// `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException`
- // constructor.
+ // constructor. However it can only do it on a fatal slow path so execution never returns to the
+ // instruction following the current one; thus 'SideEffects::None()' is used.
HBoundsCheck(HInstruction* index,
HInstruction* length,
uint32_t dex_pc,
bool is_string_char_at = false)
- : HExpression(kBoundsCheck, index->GetType(), SideEffects::CanTriggerGC(), dex_pc) {
+ : HExpression(kBoundsCheck, index->GetType(), SideEffects::None(), dex_pc) {
DCHECK_EQ(DataType::Type::kInt32, DataType::Kind(index->GetType()));
SetPackedFlag<kFlagIsStringCharAt>(is_string_char_at);
SetRawInputAt(0, index);
@@ -6104,6 +6158,9 @@
private:
static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
+ static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
+ static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+ "Too many packed fields.");
};
class HSuspendCheck final : public HExpression<0> {
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index c7539f2..597e399 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -1021,6 +1021,66 @@
DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
};
+// Performs dot product of two vectors and adds the result to wider precision components in
+// the accumulator.
+//
+// viz. DOT_PRODUCT([ a1, .. , am], [ x1, .. , xn ], [ y1, .. , yn ]) =
+// [ a1 + sum(xi * yi), .. , am + sum(xj * yj) ],
+// for m <= n, non-overlapping sums,
+// for either both signed or both unsigned operands x, y.
+//
+// Notes:
+// - packed type reflects the type of sum reduction, not the type of the operands.
+// - IsZeroExtending() is used to determine the kind of signed/zero extension to be
+// performed for the operands.
+//
+// TODO: Support types other than kInt32 for packed type.
+class HVecDotProd final : public HVecOperation {
+ public:
+ HVecDotProd(ArenaAllocator* allocator,
+ HInstruction* accumulator,
+ HInstruction* left,
+ HInstruction* right,
+ DataType::Type packed_type,
+ bool is_zero_extending,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(kVecDotProd,
+ allocator,
+ packed_type,
+ SideEffects::None(),
+ /* number_of_inputs */ 3,
+ vector_length,
+ dex_pc) {
+ DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
+ DCHECK(DataType::IsIntegralType(packed_type));
+ DCHECK(left->IsVecOperation());
+ DCHECK(right->IsVecOperation());
+ DCHECK_EQ(ToSignedType(left->AsVecOperation()->GetPackedType()),
+ ToSignedType(right->AsVecOperation()->GetPackedType()));
+ SetRawInputAt(0, accumulator);
+ SetRawInputAt(1, left);
+ SetRawInputAt(2, right);
+ SetPackedFlag<kFieldHDotProdIsZeroExtending>(is_zero_extending);
+ }
+
+ bool IsZeroExtending() const { return GetPackedFlag<kFieldHDotProdIsZeroExtending>(); }
+
+ bool CanBeMoved() const override { return true; }
+
+ DECLARE_INSTRUCTION(VecDotProd);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecDotProd);
+
+ private:
+ // Additional packed bits.
+ static constexpr size_t kFieldHDotProdIsZeroExtending =
+ HVecOperation::kNumberOfVectorOpPackedBits;
+ static constexpr size_t kNumberOfHDotProdPackedBits = kFieldHDotProdIsZeroExtending + 1;
+ static_assert(kNumberOfHDotProdPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+};
+
// Loads a vector from memory, viz. load(mem, 1)
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad final : public HVecMemoryOperation {
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 142ddb5..4b0941b 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -84,14 +84,10 @@
return HDeadCodeElimination::kDeadCodeEliminationPassName;
case OptimizationPass::kInliner:
return HInliner::kInlinerPassName;
- case OptimizationPass::kSharpening:
- return HSharpening::kSharpeningPassName;
case OptimizationPass::kSelectGenerator:
return HSelectGenerator::kSelectGeneratorPassName;
case OptimizationPass::kInstructionSimplifier:
return InstructionSimplifier::kInstructionSimplifierPassName;
- case OptimizationPass::kIntrinsicsRecognizer:
- return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName;
case OptimizationPass::kCHAGuardOptimization:
return CHAGuardOptimization::kCHAGuardOptimizationPassName;
case OptimizationPass::kCodeSinking:
@@ -141,14 +137,12 @@
X(OptimizationPass::kInductionVarAnalysis);
X(OptimizationPass::kInliner);
X(OptimizationPass::kInstructionSimplifier);
- X(OptimizationPass::kIntrinsicsRecognizer);
X(OptimizationPass::kInvariantCodeMotion);
X(OptimizationPass::kLoadStoreAnalysis);
X(OptimizationPass::kLoadStoreElimination);
X(OptimizationPass::kLoopOptimization);
X(OptimizationPass::kScheduling);
X(OptimizationPass::kSelectGenerator);
- X(OptimizationPass::kSharpening);
X(OptimizationPass::kSideEffectsAnalysis);
#ifdef ART_ENABLE_CODEGEN_arm
X(OptimizationPass::kInstructionSimplifierArm);
@@ -264,18 +258,12 @@
pass_name);
break;
}
- case OptimizationPass::kSharpening:
- opt = new (allocator) HSharpening(graph, codegen, pass_name);
- break;
case OptimizationPass::kSelectGenerator:
opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name);
break;
case OptimizationPass::kInstructionSimplifier:
opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name);
break;
- case OptimizationPass::kIntrinsicsRecognizer:
- opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name);
- break;
case OptimizationPass::kCHAGuardOptimization:
opt = new (allocator) CHAGuardOptimization(graph, pass_name);
break;
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 88b283c..ced383f 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -77,14 +77,12 @@
kInductionVarAnalysis,
kInliner,
kInstructionSimplifier,
- kIntrinsicsRecognizer,
kInvariantCodeMotion,
kLoadStoreAnalysis,
kLoadStoreElimination,
kLoopOptimization,
kScheduling,
kSelectGenerator,
- kSharpening,
kSideEffectsAnalysis,
#ifdef ART_ENABLE_CODEGEN_arm
kInstructionSimplifierArm,
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index be1f7ea..a52031c 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -128,7 +128,7 @@
public:
InternalCodeAllocator() {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return memory_.data();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0a74705..a95ddff 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -79,7 +79,7 @@
explicit CodeVectorAllocator(ArenaAllocator* allocator)
: memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return &memory_[0];
}
@@ -298,6 +298,7 @@
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger)
override
@@ -383,6 +384,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const;
@@ -399,7 +401,14 @@
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
- void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info)
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
+
+ void GenerateJitDebugInfo(ArtMethod* method,
+ const debug::MethodDebugInfo& method_debug_info)
REQUIRES_SHARED(Locks::mutator_lock_);
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -456,6 +465,48 @@
|| instruction_set == InstructionSet::kX86_64;
}
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+ case InstructionSet::kMips: {
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
+ }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case InstructionSet::kX86: {
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
+ }
+#endif
+ default:
+ UNUSED(graph);
+ UNUSED(codegen);
+ UNUSED(dex_compilation_unit);
+ UNUSED(pass_observer);
+ UNUSED(handles);
+ return false;
+ }
+}
+
bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
@@ -623,8 +674,6 @@
OptimizationDef optimizations[] = {
// Initial optimizations.
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- OptDef(OptimizationPass::kSharpening),
OptDef(OptimizationPass::kConstantFolding),
OptDef(OptimizationPass::kInstructionSimplifier),
OptDef(OptimizationPass::kDeadCodeElimination,
@@ -739,6 +788,7 @@
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -848,6 +898,11 @@
MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
break;
}
+ case kAnalysisFailIrreducibleLoopAndStringInit: {
+ MaybeRecordStat(compilation_stats_.get(),
+ MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
+ break;
+ }
case kAnalysisSuccess:
UNREACHABLE();
}
@@ -856,11 +911,11 @@
}
}
- RunOptimizations(graph,
- codegen.get(),
- dex_compilation_unit,
- &pass_observer,
- handles);
+ if (baseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ } else {
+ RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ }
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
@@ -945,9 +1000,8 @@
}
OptimizationDef optimizations[] = {
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- // Some intrinsics are converted to HIR by the simplifier and the codegen also
- // has a few assumptions that only the instruction simplifier can satisfy.
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy.
OptDef(OptimizationPass::kInstructionSimplifier),
};
RunOptimizations(graph,
@@ -1038,7 +1092,8 @@
&code_allocator,
dex_compilation_unit,
method,
- /* osr */ false,
+ compiler_driver->GetCompilerOptions().IsBaseline(),
+ /* osr= */ false,
&handles));
}
}
@@ -1198,6 +1253,7 @@
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger) {
StackHandleScope<3> hs(self);
@@ -1219,7 +1275,7 @@
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
compiler_options, access_flags, method_idx, *dex_file);
- ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+ std::vector<Handle<mirror::Object>> roots;
ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
allocator.Adapter(kArenaAllocCHA));
ArenaStack arena_stack(runtime->GetJitArenaPool());
@@ -1312,6 +1368,7 @@
&code_allocator,
dex_compilation_unit,
method,
+ baseline,
osr,
&handles));
if (codegen.get() == nullptr) {
@@ -1321,19 +1378,6 @@
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
size_t number_of_roots = codegen->GetNumberOfJitRoots();
- // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
- // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
- // executed, this array is not needed.
- Handle<mirror::ObjectArray<mirror::Object>> roots(
- hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
- self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(), number_of_roots)));
- if (roots == nullptr) {
- // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- return false;
- }
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
@@ -1347,7 +1391,14 @@
return false;
}
memcpy(stack_map_data, stack_map.data(), stack_map.size());
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
+ std::vector<Handle<mirror::Object>> roots;
+ codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
+ // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
+ DCHECK(std::all_of(roots.begin(),
+ roots.end(),
+ [&handles](Handle<mirror::Object> root){
+ return handles.Contains(root.GetReference());
+ }));
const void* code = code_cache->CommitCode(
self,
@@ -1413,7 +1464,8 @@
return true;
}
-void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) {
+void OptimizingCompiler::GenerateJitDebugInfo(
+ ArtMethod* method, const debug::MethodDebugInfo& info) {
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
DCHECK(compiler_options.GenerateAnyDebugInfo());
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f..1f4f6d5 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -59,6 +59,7 @@
kNotCompiledUnsupportedIsa,
kNotCompiledVerificationError,
kNotCompiledVerifyAtRuntime,
+ kNotCompiledIrreducibleLoopAndStringInit,
kInlinedMonomorphicCall,
kInlinedPolymorphicCall,
kMonomorphicCall,
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 399a6d8..a8ab6cd 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -174,8 +174,8 @@
template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true;
template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false;
-typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>
- ParallelMoveResolverTestTypes;
+using ParallelMoveResolverTestTypes =
+ ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>;
TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 4b07d5b..4ff293c 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -17,7 +17,6 @@
#include "pc_relative_fixups_x86.h"
#include "code_generator_x86.h"
#include "intrinsics_x86.h"
-#include "runtime.h"
namespace art {
namespace x86 {
@@ -239,7 +238,7 @@
case Intrinsics::kIntegerValueOf:
// This intrinsic can be call free if it loads the address of the boot image object.
// If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
- if (Runtime::Current()->UseJitCompilation()) {
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
break;
}
FALLTHROUGH_INTENDED;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fc81740..12db8a0 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -304,4 +304,13 @@
return true;
}
+void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) {
+ // For simplicity, our code generators don't handle implicit type conversion, so ensure
+ // there are none before hitting codegen.
+ if (instruction->IsImplicitConversion()) {
+ instruction->ReplaceWith(instruction->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a8ab256..e0bb76e 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -55,6 +55,7 @@
void VisitConstructorFence(HConstructorFence* constructor_fence) override;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index a9d5902..9079658 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -114,9 +114,9 @@
void VisitCheckCast(HCheckCast* instr) override;
void VisitBoundType(HBoundType* instr) override;
void VisitNullCheck(HNullCheck* instr) override;
- void VisitPhi(HPhi* phi);
+ void VisitPhi(HPhi* phi) override;
- void VisitBasicBlock(HBasicBlock* block);
+ void VisitBasicBlock(HBasicBlock* block) override;
void ProcessWorklist();
private:
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index db6a760..be5304c 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -872,9 +872,9 @@
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 6));
locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -895,9 +895,9 @@
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 3));
locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 5c2f57e..c864951 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -35,22 +35,6 @@
namespace art {
-bool HSharpening::Run() {
- // We don't care about the order of the blocks here.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInvokeStaticOrDirect()) {
- SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
- }
- // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
- // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
- // because we know the type better when inlining.
- }
- }
- return true;
-}
-
static bool IsInBootImage(ArtMethod* method) {
const std::vector<gc::space::ImageSpace*>& image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
@@ -72,17 +56,14 @@
return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
-void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen) {
- if (invoke->IsStringInit()) {
- // Not using the dex cache arrays. But we could still try to use a better dispatch...
- // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
- return;
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below.
+ DCHECK(callee != nullptr);
+ DCHECK(!(callee->IsConstructor() && callee->GetDeclaringClass()->IsStringClass()));
}
- ArtMethod* callee = invoke->GetResolvedMethod();
- DCHECK(callee != nullptr);
-
HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
uint64_t method_load_data = 0u;
@@ -141,9 +122,7 @@
HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
method_load_kind, code_ptr_location, method_load_data
};
- HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
- invoke->SetDispatchInfo(dispatch_info);
+ return codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, callee);
}
HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index dc55eea..b818672 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -25,24 +25,13 @@
class CodeGenerator;
class DexCompilationUnit;
-// Optimization that tries to improve the way we dispatch methods and access types,
-// fields, etc. Besides actual method sharpening based on receiver type (for example
-// virtual->direct), this includes selecting the best available dispatch for
-// invoke-static/-direct based on code generator support.
-class HSharpening : public HOptimization {
+// Utility methods that try to improve the way we dispatch methods, and access
+// types and strings.
+class HSharpening {
public:
- HSharpening(HGraph* graph,
- CodeGenerator* codegen,
- const char* name = kSharpeningPassName)
- : HOptimization(graph, name),
- codegen_(codegen) { }
-
- bool Run() override;
-
- static constexpr const char* kSharpeningPassName = "sharpening";
-
- // Used by Sharpening and InstructionSimplifier.
- static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+ // Used by the builder and InstructionSimplifier.
+ static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen);
// Used by the builder and the inliner.
static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
@@ -61,9 +50,6 @@
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
-
- private:
- CodeGenerator* codegen_;
};
} // namespace art
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 9731712..4b0be07 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -202,6 +202,7 @@
EXPECT_TRUE(depends_on_gc.MayDependOn(all_changes));
EXPECT_TRUE(depends_on_gc.Union(can_trigger_gc).MayDependOn(all_changes));
EXPECT_FALSE(can_trigger_gc.MayDependOn(all_changes));
+ EXPECT_FALSE(can_trigger_gc.MayDependOn(can_trigger_gc));
EXPECT_TRUE(all_changes.Includes(can_trigger_gc));
EXPECT_FALSE(all_changes.Includes(depends_on_gc));
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index dda29a1..16c23c8 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -16,6 +16,8 @@
#include "ssa_builder.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "mirror/class-inl.h"
@@ -415,85 +417,36 @@
return true;
}
-static bool HasAliasInEnvironments(HInstruction* instruction) {
- HEnvironment* last_user = nullptr;
+bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) {
+ ScopedArenaHashSet<size_t> seen_users(
+ local_allocator_->Adapter(kArenaAllocGraphBuilder));
for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
DCHECK(use.GetUser() != nullptr);
- // Note: The first comparison (== null) always fails.
- if (use.GetUser() == last_user) {
+ size_t id = use.GetUser()->GetHolder()->GetId();
+ if (seen_users.find(id) != seen_users.end()) {
return true;
}
- last_user = use.GetUser();
- }
-
- if (kIsDebugBuild) {
- // Do a quadratic search to ensure same environment uses are next
- // to each other.
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) {
- auto next = current;
- for (++next; next != end; ++next) {
- DCHECK(next->GetUser() != current->GetUser());
- }
- }
+ seen_users.insert(id);
}
return false;
}
-void SsaBuilder::ReplaceUninitializedStringPhis() {
- ScopedArenaHashSet<HInstruction*> seen_instructions(
- local_allocator_->Adapter(kArenaAllocGraphBuilder));
- ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
-
- // Iterate over all inputs and uses of the phi, recursively, until all related instructions
- // have been visited.
- for (const auto& pair : uninitialized_string_phis_) {
- HPhi* string_phi = pair.first;
- HInvoke* invoke = pair.second;
- worklist.push_back(string_phi);
- HNewInstance* found_instance = nullptr;
- do {
- HInstruction* current = worklist.back();
- worklist.pop_back();
- if (seen_instructions.find(current) != seen_instructions.end()) {
- continue;
- }
- seen_instructions.insert(current);
- if (current->IsNewInstance()) {
- // If it is the first time we see the allocation, replace its uses. We don't register
- // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
- // aliasing and environment uses that don't hold when the string escapes to phis.
- // Note that this also means we will keep the (useless) allocation.
- if (found_instance == nullptr) {
- found_instance = current->AsNewInstance();
- } else {
- DCHECK(found_instance == current);
- }
- } else if (current->IsPhi()) {
- // Push all inputs to the worklist. Those should be Phis or NewInstance.
- for (HInstruction* input : current->GetInputs()) {
- DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName();
- worklist.push_back(input);
- }
- } else {
- // The verifier prevents any other DEX uses of the uninitialized string.
- DCHECK(current->IsEqual() || current->IsNotEqual());
- continue;
- }
- current->ReplaceUsesDominatedBy(invoke, invoke);
- current->ReplaceEnvUsesDominatedBy(invoke, invoke);
- // Push all users to the worklist. Now that we have replaced
- // the uses dominated by the invokes, the remaining users should only
- // be Phi, or Equal/NotEqual.
- for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
- HInstruction* user = use.GetUser();
- DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName();
- worklist.push_back(user);
- }
- } while (!worklist.empty());
- seen_instructions.clear();
- DCHECK(found_instance != nullptr);
+bool SsaBuilder::ReplaceUninitializedStringPhis() {
+ for (HInvoke* invoke : uninitialized_string_phis_) {
+ HInstruction* str = invoke->InputAt(invoke->InputCount() - 1);
+ if (str->IsPhi()) {
+ // If after redundant phi and dead phi elimination, it's still a phi that feeds
+ // the invoke, then we must be compiling a method with irreducible loops. Just bail.
+ DCHECK(graph_->HasIrreducibleLoops());
+ return false;
+ }
+ DCHECK(str->IsNewInstance());
+ AddUninitializedString(str->AsNewInstance());
+ str->ReplaceUsesDominatedBy(invoke, invoke);
+ str->ReplaceEnvUsesDominatedBy(invoke, invoke);
+ invoke->RemoveInputAt(invoke->InputCount() - 1);
}
+ return true;
}
void SsaBuilder::RemoveRedundantUninitializedStrings() {
@@ -508,8 +461,9 @@
DCHECK(new_instance->IsStringAlloc());
// Replace NewInstance of String with NullConstant if not used prior to
- // calling StringFactory. In case of deoptimization, the interpreter is
- // expected to skip null check on the `this` argument of the StringFactory call.
+ // calling StringFactory. We check for alias environments in case of deoptimization.
+ // The interpreter is expected to skip null check on the `this` argument of the
+ // StringFactory call.
if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
new_instance->ReplaceWith(graph_->GetNullConstant());
new_instance->GetBlock()->RemoveInstruction(new_instance);
@@ -544,11 +498,6 @@
GraphAnalysisResult SsaBuilder::BuildSsa() {
DCHECK(!graph_->IsInSsaForm());
- // Replace Phis that feed in a String.<init>, as well as their aliases, with
- // the actual String allocation invocation. We do this first, as the phis stored in
- // the data structure might get removed from the graph in later stages during `BuildSsa`.
- ReplaceUninitializedStringPhis();
-
// Propagate types of phis. At this point, phis are typed void in the general
// case, or float/double/reference if we created an equivalent phi. So we need
// to propagate the types across phis to give them a correct type. If a type
@@ -607,6 +556,14 @@
// input types.
dead_phi_elimimation.EliminateDeadPhis();
+ // Replace Phis that feed in a String.<init> during instruction building. We
+ // run this after redundant and dead phi elimination to make sure the phi will have
+ // been replaced by the actual allocation. Only with an irreducible loop
+ // a phi can still be the input, in which case we bail.
+ if (!ReplaceUninitializedStringPhis()) {
+ return kAnalysisFailIrreducibleLoopAndStringInit;
+ }
+
// HInstructionBuidler replaced uses of NewInstances of String with the
// results of their corresponding StringFactory calls. Unless the String
// objects are used before they are initialized, they can be replaced with
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 7655445..bb892c9 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -97,8 +97,8 @@
}
}
- void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
- uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+ void AddUninitializedStringPhi(HInvoke* invoke) {
+ uninitialized_string_phis_.push_back(invoke);
}
private:
@@ -123,7 +123,8 @@
HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
void RemoveRedundantUninitializedStrings();
- void ReplaceUninitializedStringPhis();
+ bool ReplaceUninitializedStringPhis();
+ bool HasAliasInEnvironments(HInstruction* instruction);
HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
@@ -137,7 +138,7 @@
ScopedArenaVector<HArrayGet*> ambiguous_agets_;
ScopedArenaVector<HArraySet*> ambiguous_asets_;
ScopedArenaVector<HNewInstance*> uninitialized_strings_;
- ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
+ ScopedArenaVector<HInvoke*> uninitialized_string_phis_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c0b6f98..a673e32 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -179,7 +179,7 @@
return;
}
- typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+ using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
const std::vector<uint8_t>& old_stream = data.first;
const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -3610,7 +3610,7 @@
label->LinkTo(branch_id);
}
// Reserve space for the branch.
- while (length--) {
+ for (; length != 0u; --length) {
Nop();
}
}
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index f9919f5..98fc44b 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -38,12 +38,12 @@
uint32_t,
mips::VectorRegister> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t,
- mips::VectorRegister> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t,
+ mips::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -229,7 +229,7 @@
STLDeleteElements(&vec_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 1ec7a6a..723c489 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -38,12 +38,12 @@
uint32_t,
mips::VectorRegister> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t,
- mips::VectorRegister> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t,
+ mips::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -242,7 +242,7 @@
STLDeleteElements(&vec_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 9527fa6..4f8ccee 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -37,11 +37,11 @@
mips::FRegister,
uint32_t> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -176,7 +176,7 @@
STLDeleteElements(&fp_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 5b1c5d9..29d2bed 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -52,7 +52,7 @@
return;
}
- typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+ using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
const std::vector<uint8_t>& old_stream = data.first;
const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -2889,7 +2889,7 @@
label->LinkTo(branch_id);
}
// Reserve space for the branch.
- while (length--) {
+ for (; length != 0u; --length) {
Nop();
}
}
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 4ceb356..66711c3 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -41,12 +41,12 @@
uint32_t,
mips64::VectorRegister> {
public:
- typedef AssemblerTest<mips64::Mips64Assembler,
- mips64::Mips64Label,
- mips64::GpuRegister,
- mips64::FpuRegister,
- uint32_t,
- mips64::VectorRegister> Base;
+ using Base = AssemblerTest<mips64::Mips64Assembler,
+ mips64::Mips64Label,
+ mips64::GpuRegister,
+ mips64::FpuRegister,
+ uint32_t,
+ mips64::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -240,7 +240,7 @@
STLDeleteElements(&vec_registers_);
}
- std::vector<mips64::Mips64Label> GetAddresses() {
+ std::vector<mips64::Mips64Label> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index b03c40a..ad75174 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -44,11 +44,11 @@
x86::XmmRegister,
x86::Immediate> {
public:
- typedef AssemblerTest<x86::X86Assembler,
- x86::Address,
- x86::Register,
- x86::XmmRegister,
- x86::Immediate> Base;
+ using Base = AssemblerTest<x86::X86Assembler,
+ x86::Address,
+ x86::Register,
+ x86::XmmRegister,
+ x86::Immediate>;
protected:
std::string GetArchitectureString() override {
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index e1de1f1..fe42f9b 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -137,11 +137,11 @@
x86_64::XmmRegister,
x86_64::Immediate> {
public:
- typedef AssemblerTest<x86_64::X86_64Assembler,
- x86_64::Address,
- x86_64::CpuRegister,
- x86_64::XmmRegister,
- x86_64::Immediate> Base;
+ using Base = AssemblerTest<x86_64::X86_64Assembler,
+ x86_64::Address,
+ x86_64::CpuRegister,
+ x86_64::XmmRegister,
+ x86_64::Immediate>;
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
@@ -297,7 +297,7 @@
STLDeleteElements(&fp_registers_);
}
- std::vector<x86_64::Address> GetAddresses() {
+ std::vector<x86_64::Address> GetAddresses() override {
return addresses_;
}
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 136066d..e1b23cc 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -49,10 +49,9 @@
void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
- bool IsRelocationPossible() override { return false; }
verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
- void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
+ void SetVerifierDeps(verifier::VerifierDeps* deps) override { deps_ = deps; }
private:
verifier::VerifierDeps* deps_;
@@ -60,7 +59,7 @@
class VerifierDepsTest : public CommonCompilerTest {
public:
- void SetUpRuntimeOptions(RuntimeOptions* options) {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonCompilerTest::SetUpRuntimeOptions(options);
callbacks_.reset(new VerifierDepsCompilerCallbacks());
}
@@ -178,7 +177,8 @@
true /* allow_soft_failures */,
true /* need_precise_constants */,
false /* verify to dump */,
- true /* allow_thread_suspension */);
+ true /* allow_thread_suspension */,
+ 0 /* api_level */);
verifier.Verify();
soa.Self()->SetVerifierDeps(nullptr);
has_failures = verifier.HasFailures();
@@ -423,7 +423,7 @@
return verifier_deps_->dex_deps_.size();
}
- size_t HasEachKindOfRecord() {
+ bool HasEachKindOfRecord() {
bool has_strings = false;
bool has_assignability = false;
bool has_classes = false;
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 88e69cd..666db42 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -89,6 +89,20 @@
},
}
+cc_defaults {
+ name: "libart-dex2oat_static_base_defaults",
+ target: {
+ android: {
+ static_libs: ["libcutils"],
+ },
+ },
+ static_libs: [
+ "libbase",
+ "liblz4",
+ "liblzma",
+ ],
+}
+
gensrcs {
name: "art_dex2oat_operator_srcs",
cmd: "$(location generate_operator_out) art/dex2oat $(in) > $(out)",
@@ -110,6 +124,20 @@
],
}
+cc_defaults {
+ name: "libart-dex2oat_static_defaults",
+ defaults: [
+ "libart-dex2oat_static_base_defaults",
+ "libart_static_defaults",
+ "libprofile_static_defaults",
+ ],
+ static_libs: [
+ "libart-compiler",
+ "libart-dexlayout",
+ "libart-dex2oat",
+ ],
+}
+
art_cc_static_library {
name: "libartd-dex2oat",
defaults: [
@@ -124,6 +152,20 @@
],
}
+cc_defaults {
+ name: "libartd-dex2oat_static_defaults",
+ defaults: [
+ "libart-dex2oat_static_base_defaults",
+ "libartd_static_defaults",
+ "libprofiled_static_defaults",
+ ],
+ static_libs: [
+ "libartd-compiler",
+ "libartd-dexlayout",
+ "libartd-dex2oat",
+ ],
+}
+
cc_library_headers {
name: "dex2oat_headers",
host_supported: true,
@@ -255,7 +297,9 @@
name: "dex2oats-defaults",
device_supported: false,
static_executable: true,
- defaults: ["dex2oat-defaults"],
+ defaults: [
+ "dex2oat-defaults",
+ ],
target: {
darwin: {
enabled: false,
@@ -269,22 +313,24 @@
// Try to get rid of it.
"-z muldefs",
],
- static_libs: art_static_dependencies,
+ static_libs: [
+ "libbase",
+ "liblz4",
+ "libsigchain_dummy",
+ ],
}
art_cc_binary {
name: "dex2oats",
- defaults: ["dex2oats-defaults"],
- static_libs: [
- "libart-dex2oat",
- "libart-compiler",
- "libart-dexlayout",
- "libart",
- "libartbase",
- "libdexfile",
- "libprofile",
- "libvixl-arm",
- "libvixl-arm64",
+ defaults: [
+ "dex2oats-defaults",
+ "libart_static_defaults",
+ "libart-compiler_static_defaults",
+ "libart-dexlayout_static_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ "libart-dex2oat_static_defaults",
],
}
@@ -293,6 +339,13 @@
defaults: [
"art_debug_defaults",
"dex2oats-defaults",
+ "libartd_static_defaults",
+ "libartd-compiler_static_defaults",
+ "libartd-dexlayout_static_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ "libartd-dex2oat_static_defaults",
],
target: {
linux_glibc_x86_64: {
@@ -301,17 +354,6 @@
},
// b/79417743, oatdump 32-bit tests failed with clang lld
use_clang_lld: false,
- static_libs: [
- "libartd-dex2oat",
- "libartd-compiler",
- "libartd-dexlayout",
- "libartd",
- "libartbased",
- "libprofiled",
- "libdexfiled",
- "libvixld-arm",
- "libvixld-arm64",
- ],
}
art_cc_test {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 5655b3c..f0f2b3e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -296,9 +296,6 @@
UsageError(" Example: --instruction-set-features=div");
UsageError(" Default: default");
UsageError("");
- UsageError(" --compile-pic: Force indirect use of code, methods, and classes");
- UsageError(" Default: disabled for apps (ignored for boot image which is always PIC)");
- UsageError("");
UsageError(" --compiler-backend=(Quick|Optimizing): select compiler backend");
UsageError(" set.");
UsageError(" Example: --compiler-backend=Optimizing");
@@ -419,8 +416,7 @@
UsageError(" --app-image-file=<file-name>: specify a file name for app image.");
UsageError(" Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art");
UsageError("");
- UsageError(" --multi-image: specify that separate oat and image files be generated for each "
- "input dex file.");
+ UsageError(" --multi-image: obsolete, ignored");
UsageError("");
UsageError(" --force-determinism: force the compiler to emit a deterministic output.");
UsageError("");
@@ -481,6 +477,9 @@
UsageError(" compiling the apk. If specified, the string will be embedded verbatim in");
UsageError(" the key value store of the oat file.");
UsageError("");
+ UsageError(" --resolve-startup-const-strings=true|false: If true, the compiler eagerly");
+ UsageError(" resolves strings referenced from const-string of startup methods.");
+ UsageError("");
UsageError(" Example: --compilation-reason=install");
UsageError("");
std::cerr << "See log for usage error information\n";
@@ -590,8 +589,10 @@
const char* reason = "dex2oat watch dog thread waiting";
CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
while (!shutting_down_) {
- int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts));
- if (rc == ETIMEDOUT) {
+ int rc = pthread_cond_timedwait(&cond_, &mutex_, &timeout_ts);
+ if (rc == EINTR) {
+ continue;
+ } else if (rc == ETIMEDOUT) {
Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
timeout_in_milliseconds_/1000));
} else if (rc != 0) {
@@ -619,8 +620,6 @@
compiler_kind_(Compiler::kOptimizing),
// Take the default set of instruction features from the build.
image_file_location_oat_checksum_(0),
- image_file_location_oat_data_begin_(0),
- image_patch_delta_(0),
key_value_store_(nullptr),
verification_results_(nullptr),
runtime_(nullptr),
@@ -639,7 +638,6 @@
image_storage_mode_(ImageHeader::kStorageModeUncompressed),
passes_to_run_filename_(nullptr),
dirty_image_objects_filename_(nullptr),
- multi_image_(false),
is_host_(false),
elf_writers_(),
oat_writers_(),
@@ -664,21 +662,21 @@
if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
// We want to just exit on non-debug builds, not bringing the runtime down
// in an orderly fashion. So release the following fields.
- driver_.release();
- image_writer_.release();
+ driver_.release(); // NOLINT
+ image_writer_.release(); // NOLINT
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
new std::vector<MemMap>(std::move(opened_dex_files_maps_)); // Leak MemMaps.
for (std::unique_ptr<File>& vdex_file : vdex_files_) {
- vdex_file.release();
+ vdex_file.release(); // NOLINT
}
for (std::unique_ptr<File>& oat_file : oat_files_) {
- oat_file.release();
+ oat_file.release(); // NOLINT
}
- runtime_.release();
- verification_results_.release();
- key_value_store_.release();
+ runtime_.release(); // NOLINT
+ verification_results_.release(); // NOLINT
+ key_value_store_.release(); // NOLINT
}
}
@@ -729,10 +727,8 @@
}
void ProcessOptions(ParserOptions* parser_options) {
+ compiler_options_->compile_pic_ = true; // All AOT compilation is PIC.
compiler_options_->boot_image_ = !image_filenames_.empty();
- if (compiler_options_->boot_image_) {
- compiler_options_->compile_pic_ = true;
- }
compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
if (IsBootImage() && image_filenames_.size() == 1) {
@@ -921,20 +917,6 @@
break;
}
- if (!IsBootImage() && multi_image_) {
- Usage("--multi-image can only be used when creating boot images");
- }
- if (IsBootImage() && multi_image_ && image_filenames_.size() > 1) {
- Usage("--multi-image cannot be used with multiple image names");
- }
-
- // For now, if we're on the host and compile the boot image, *always* use multiple image files.
- if (!kIsTargetBuild && IsBootImage()) {
- if (image_filenames_.size() == 1) {
- multi_image_ = true;
- }
- }
-
// Done with usage checks, enable watchdog if requested
if (parser_options->watch_dog_enabled) {
int64_t timeout = parser_options->watch_dog_timeout_in_ms > 0
@@ -980,7 +962,7 @@
std::string base_oat = oat_filenames_[0];
size_t last_oat_slash = base_oat.rfind('/');
if (last_oat_slash == std::string::npos) {
- Usage("--multi-image used with unusable oat filename %s", base_oat.c_str());
+ Usage("Unusable boot image oat filename %s", base_oat.c_str());
}
// We also need to honor path components that were encoded through '@'. Otherwise the loading
// code won't be able to find the images.
@@ -992,7 +974,7 @@
std::string base_img = image_filenames_[0];
size_t last_img_slash = base_img.rfind('/');
if (last_img_slash == std::string::npos) {
- Usage("--multi-image used with unusable image filename %s", base_img.c_str());
+ Usage("Unusable boot image filename %s", base_img.c_str());
}
// We also need to honor path components that were encoded through '@'. Otherwise the loading
// code won't be able to find the images.
@@ -1012,31 +994,12 @@
base_img = base_img.substr(0, last_img_slash + 1);
- // Note: we have some special case here for our testing. We have to inject the differentiating
- // parts for the different core images.
- std::string infix; // Empty infix by default.
- {
- // Check the first name.
- std::string dex_file = oat_filenames_[0];
- size_t last_dex_slash = dex_file.rfind('/');
- if (last_dex_slash != std::string::npos) {
- dex_file = dex_file.substr(last_dex_slash + 1);
- }
- size_t last_dex_dot = dex_file.rfind('.');
- if (last_dex_dot != std::string::npos) {
- dex_file = dex_file.substr(0, last_dex_dot);
- }
- if (android::base::StartsWith(dex_file, "core-")) {
- infix = dex_file.substr(strlen("core"));
- }
- }
-
std::string base_symbol_oat;
if (!oat_unstripped_.empty()) {
base_symbol_oat = oat_unstripped_[0];
size_t last_symbol_oat_slash = base_symbol_oat.rfind('/');
if (last_symbol_oat_slash == std::string::npos) {
- Usage("--multi-image used with unusable symbol filename %s", base_symbol_oat.c_str());
+ Usage("Unusable boot image symbol filename %s", base_symbol_oat.c_str());
}
base_symbol_oat = base_symbol_oat.substr(0, last_symbol_oat_slash + 1);
}
@@ -1044,11 +1007,11 @@
// Now create the other names. Use a counted loop to skip the first one.
for (size_t i = 1; i < dex_locations_.size(); ++i) {
// TODO: Make everything properly std::string.
- std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".art");
+ std::string image_name = CreateMultiImageName(dex_locations_[i], prefix, ".art");
char_backing_storage_.push_front(base_img + image_name);
image_filenames_.push_back(char_backing_storage_.front().c_str());
- std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, infix, ".oat");
+ std::string oat_name = CreateMultiImageName(dex_locations_[i], prefix, ".oat");
char_backing_storage_.push_front(base_oat + oat_name);
oat_filenames_.push_back(char_backing_storage_.front().c_str());
@@ -1063,11 +1026,9 @@
// 0) Assume input is /a/b/c.d
// 1) Strip the path -> c.d
// 2) Inject prefix p -> pc.d
- // 3) Inject infix i -> pci.d
- // 4) Replace suffix with s if it's "jar" -> d == "jar" -> pci.s
+ // 3) Replace suffix with s if it's "jar" -> d == "jar" -> pc.s
static std::string CreateMultiImageName(std::string in,
const std::string& prefix,
- const std::string& infix,
const char* replace_suffix) {
size_t last_dex_slash = in.rfind('/');
if (last_dex_slash != std::string::npos) {
@@ -1076,13 +1037,6 @@
if (!prefix.empty()) {
in = prefix + in;
}
- if (!infix.empty()) {
- // Inject infix.
- size_t last_dot = in.rfind('.');
- if (last_dot != std::string::npos) {
- in.insert(last_dot, infix);
- }
- }
if (android::base::EndsWith(in, ".jar")) {
in = in.substr(0, in.length() - strlen(".jar")) +
(replace_suffix != nullptr ? replace_suffix : "");
@@ -1105,9 +1059,6 @@
oss << kRuntimeISA;
key_value_store_->Put(OatHeader::kDex2OatHostKey, oss.str());
key_value_store_->Put(
- OatHeader::kPicKey,
- compiler_options_->compile_pic_ ? OatHeader::kTrueValue : OatHeader::kFalseValue);
- key_value_store_->Put(
OatHeader::kDebuggableKey,
compiler_options_->debuggable_ ? OatHeader::kTrueValue : OatHeader::kFalseValue);
key_value_store_->Put(
@@ -1232,7 +1183,6 @@
AssignTrueIfExists(args, M::Host, &is_host_);
AssignTrueIfExists(args, M::AvoidStoringInvocation, &avoid_storing_invocation_);
- AssignTrueIfExists(args, M::MultiImage, &multi_image_);
AssignIfExists(args, M::CopyDexFiles, ©_dex_files_);
if (args.Exists(M::ForceDeterminism)) {
@@ -1296,7 +1246,7 @@
PruneNonExistentDexFiles();
// Expand oat and image filenames for multi image.
- if (IsBootImage() && multi_image_) {
+ if (IsBootImage() && image_filenames_.size() == 1) {
ExpandOatAndImageFilenames();
}
@@ -1348,12 +1298,12 @@
}
}
} else {
- std::unique_ptr<File> oat_file(new File(oat_fd_, oat_location_, /* check_usage */ true));
- if (oat_file == nullptr) {
+ std::unique_ptr<File> oat_file(
+ new File(DupCloexec(oat_fd_), oat_location_, /* check_usage */ true));
+ if (!oat_file->IsOpened()) {
PLOG(ERROR) << "Failed to create oat file: " << oat_location_;
return false;
}
- oat_file->DisableAutoClose();
if (oat_file->SetLength(0) != 0) {
PLOG(WARNING) << "Truncating oat file " << oat_location_ << " failed.";
oat_file->Erase();
@@ -1385,12 +1335,12 @@
DCHECK_NE(output_vdex_fd_, -1);
std::string vdex_location = ReplaceFileExtension(oat_location_, "vdex");
- std::unique_ptr<File> vdex_file(new File(output_vdex_fd_, vdex_location, /* check_usage */ true));
- if (vdex_file == nullptr) {
+ std::unique_ptr<File> vdex_file(new File(
+ DupCloexec(output_vdex_fd_), vdex_location, /* check_usage */ true));
+ if (!vdex_file->IsOpened()) {
PLOG(ERROR) << "Failed to create vdex file: " << vdex_location;
return false;
}
- vdex_file->DisableAutoClose();
if (input_vdex_file_ != nullptr && output_vdex_fd_ == input_vdex_fd_) {
update_input_vdex_ = true;
} else {
@@ -1472,10 +1422,7 @@
PLOG(ERROR) << "Failed to create swap file: " << swap_file_name_;
return false;
}
- swap_fd_ = swap_file->Fd();
- swap_file->MarkUnchecked(); // We don't we to track this, it will be unlinked immediately.
- swap_file->DisableAutoClose(); // We'll handle it ourselves, the File object will be
- // released immediately.
+ swap_fd_ = swap_file->Release();
unlink(swap_file_name_.c_str());
}
@@ -1566,9 +1513,6 @@
std::vector<gc::space::ImageSpace*> image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
image_file_location_oat_checksum_ = image_spaces[0]->GetImageHeader().GetOatChecksum();
- image_file_location_oat_data_begin_ =
- reinterpret_cast<uintptr_t>(image_spaces[0]->GetImageHeader().GetOatDataBegin());
- image_patch_delta_ = image_spaces[0]->GetImageHeader().GetPatchDelta();
// Store the boot image filename(s).
std::vector<std::string> image_filenames;
for (const gc::space::ImageSpace* image_space : image_spaces) {
@@ -1580,8 +1524,6 @@
}
} else {
image_file_location_oat_checksum_ = 0u;
- image_file_location_oat_data_begin_ = 0u;
- image_patch_delta_ = 0;
}
// Open dex files for class path.
@@ -2028,7 +1970,6 @@
image_writer_.reset(new linker::ImageWriter(*compiler_options_,
image_base_,
- compiler_options_->GetCompilePic(),
IsAppImage(),
image_storage_mode_,
oat_filenames_,
@@ -2151,10 +2092,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer->WriteHeader(elf_writer->GetStream(),
- image_file_location_oat_checksum_,
- image_file_location_oat_data_begin_,
- image_patch_delta_)) {
+ if (!oat_writer->WriteHeader(elf_writer->GetStream(), image_file_location_oat_checksum_)) {
LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath();
return false;
}
@@ -2654,32 +2592,9 @@
for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
oat_data_begins.push_back(image_writer_->GetOatDataBegin(i));
}
- // Destroy ImageWriter before doing FixupElf.
+ // Destroy ImageWriter.
image_writer_.reset();
- for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
- const char* oat_filename = oat_filenames_[i];
- // Do not fix up the ELF file if we are --compile-pic or compiling the app image
- if (!compiler_options_->GetCompilePic() && IsBootImage()) {
- std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
- if (oat_file.get() == nullptr) {
- PLOG(ERROR) << "Failed to open ELF file: " << oat_filename;
- return false;
- }
-
- if (!linker::ElfWriter::Fixup(oat_file.get(), oat_data_begins[i])) {
- oat_file->Erase();
- LOG(ERROR) << "Failed to fixup ELF file " << oat_file->GetPath();
- return false;
- }
-
- if (oat_file->FlushCloseOrErase()) {
- PLOG(ERROR) << "Failed to flush and close fixed ELF file " << oat_file->GetPath();
- return false;
- }
- }
- }
-
return true;
}
@@ -2805,8 +2720,6 @@
Compiler::Kind compiler_kind_;
uint32_t image_file_location_oat_checksum_;
- uintptr_t image_file_location_oat_data_begin_;
- int32_t image_patch_delta_;
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
std::unique_ptr<VerificationResults> verification_results_;
@@ -2855,7 +2768,6 @@
const char* dirty_image_objects_filename_;
std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
- bool multi_image_;
bool is_host_;
std::string android_root_;
std::string no_inline_from_string_;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index fb19a27..d153459 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -39,6 +39,7 @@
#include "dex/dex_file_loader.h"
#include "dex2oat_environment_test.h"
#include "dex2oat_return_codes.h"
+#include "intern_table-inl.h"
#include "oat.h"
#include "oat_file.h"
#include "profile/profile_compilation_info.h"
@@ -136,14 +137,13 @@
ASSERT_TRUE(success) << error_msg << std::endl << output_;
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -156,14 +156,13 @@
if (!test_accepts_odex_file_on_failure) {
// Verify there's no loadable odex file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() == nullptr);
}
@@ -324,26 +323,26 @@
};
TEST_F(Dex2oatSwapTest, DoNotUseSwapDefaultSingleSmall) {
- RunTest(false /* use_fd */, false /* expect_use */);
- RunTest(true /* use_fd */, false /* expect_use */);
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false);
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false);
}
TEST_F(Dex2oatSwapTest, DoNotUseSwapSingle) {
- RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
- RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-size-threshold=0" });
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-size-threshold=0" });
}
TEST_F(Dex2oatSwapTest, DoNotUseSwapSmall) {
- RunTest(false /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
- RunTest(true /* use_fd */, false /* expect_use */, { "--swap-dex-count-threshold=0" });
+ RunTest(/*use_fd=*/ false, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
+ RunTest(/*use_fd=*/ true, /*expect_use=*/ false, { "--swap-dex-count-threshold=0" });
}
TEST_F(Dex2oatSwapTest, DoUseSwapSingleSmall) {
- RunTest(false /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
- RunTest(true /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ true,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
}
@@ -369,7 +368,7 @@
void GrabResult1() {
if (!kIsTargetBuild) {
native_alloc_1_ = ParseNativeAlloc();
- swap_1_ = ParseSwap(false /* expected */);
+ swap_1_ = ParseSwap(/*expected=*/ false);
} else {
native_alloc_1_ = std::numeric_limits<size_t>::max();
swap_1_ = 0;
@@ -379,7 +378,7 @@
void GrabResult2() {
if (!kIsTargetBuild) {
native_alloc_2_ = ParseNativeAlloc();
- swap_2_ = ParseSwap(true /* expected */);
+ swap_2_ = ParseSwap(/*expected=*/ true);
} else {
native_alloc_2_ = 0;
swap_2_ = std::numeric_limits<size_t>::max();
@@ -449,15 +448,15 @@
// investigate (b/29259363).
TEST_DISABLED_FOR_X86();
- RunTest(false /* use_fd */,
- false /* expect_use */);
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ false);
GrabResult1();
std::string output_1 = output_;
output_ = "";
- RunTest(false /* use_fd */,
- true /* expect_use */,
+ RunTest(/*use_fd=*/ false,
+ /*expect_use=*/ true,
{ "--swap-dex-size-threshold=0", "--swap-dex-count-threshold=0" });
GrabResult2();
std::string output_2 = output_;
@@ -513,14 +512,13 @@
}
// Host/target independent checks.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_GT(app_image_file.length(), 0u);
@@ -662,7 +660,7 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
const ArtDexFileLoader dex_file_loader;
ASSERT_TRUE(dex_file_loader.Open(
- location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+ location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
GenerateProfile(profile_location,
@@ -714,8 +712,8 @@
CompileProfileOdex(dex_location,
odex_location,
app_image_file,
- /* use_fd */ false,
- /* num_profile_classes */ 0);
+ /*use_fd=*/ false,
+ /*num_profile_classes=*/ 0);
CheckValidity();
ASSERT_TRUE(success_);
// Don't check the result since CheckResult relies on the class being in the profile.
@@ -727,8 +725,8 @@
CompileProfileOdex(dex_location,
odex_location,
app_image_file,
- /* use_fd */ false,
- /* num_profile_classes */ 1);
+ /*use_fd=*/ false,
+ /*num_profile_classes=*/ 1);
CheckValidity();
ASSERT_TRUE(success_);
CheckResult(dex_location, odex_location, app_image_file);
@@ -756,8 +754,8 @@
CompileProfileOdex(dex_location,
odex_location,
app_image_file_name,
- /* use_fd */ true,
- /* num_profile_classes */ 1,
+ /*use_fd=*/ true,
+ /*num_profile_classes=*/ 1,
{ input_vdex, output_vdex });
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -768,10 +766,10 @@
CompileProfileOdex(dex_location,
odex_location,
app_image_file_name,
- /* use_fd */ true,
- /* num_profile_classes */ 1,
+ /*use_fd=*/ true,
+ /*num_profile_classes=*/ 1,
{ input_vdex, output_vdex },
- /* expect_success */ true);
+ /*expect_success=*/ true);
EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -784,14 +782,13 @@
const std::string& app_image_file_name) {
// Host/target independent checks.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
@@ -799,7 +796,7 @@
std::vector<std::unique_ptr<const DexFile>> dex_files;
const ArtDexFileLoader dex_file_loader;
ASSERT_TRUE(dex_file_loader.Open(
- location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
+ location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -852,11 +849,11 @@
};
TEST_F(Dex2oatLayoutTest, TestLayout) {
- RunTest(/* app-image */ false);
+ RunTest(/*app_image=*/ false);
}
TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
- RunTest(/* app-image */ true);
+ RunTest(/*app_image=*/ true);
}
TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
@@ -881,8 +878,8 @@
odex_location,
CompilerFilter::kQuicken,
{ input_vdex, output_vdex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
// Unquicken by running the verify compiler filter on the vdex file.
@@ -893,8 +890,8 @@
odex_location,
CompilerFilter::kVerify,
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
CheckResult(dex_location, odex_location);
@@ -922,8 +919,8 @@
odex_location,
CompilerFilter::kQuicken,
{ input_vdex, output_vdex, "--compact-dex-level=fast"},
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
@@ -935,8 +932,8 @@
odex_location2,
CompilerFilter::kVerify,
{ input_vdex, output_vdex, "--compact-dex-level=none"},
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
ASSERT_EQ(vdex_file2->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
@@ -946,14 +943,13 @@
void CheckResult(const std::string& dex_location, const std::string& odex_location) {
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -1326,14 +1322,13 @@
EXPECT_EQ(res, 0);
// Open our generated oat file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex->GetLocation().c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1436,14 +1431,13 @@
{"--compact-dex-level=fast"});
EXPECT_EQ(res, 0);
// Open our generated oat file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1681,14 +1675,13 @@
});
// Open our generated oat file.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename.c_str(),
oat_filename.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
temp_dex.GetFilename().c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1705,7 +1698,7 @@
// Create a multidex file with only one dex that gets rejected for cdex conversion.
ScratchFile apk_file;
{
- FILE* file = fdopen(apk_file.GetFd(), "w+b");
+ FILE* file = fdopen(dup(apk_file.GetFd()), "w+b");
ZipWriter writer(file);
// Add vdex to zip.
writer.StartEntry("classes.dex", ZipWriter::kCompress);
@@ -1759,14 +1752,13 @@
{ "--compilation-reason=install" },
true);
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1785,14 +1777,13 @@
{},
true);
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1816,21 +1807,20 @@
{
// Check the vdex doesn't have dex.
std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_location.c_str(),
- /*writable*/ false,
- /*low_4gb*/ false,
- /*unquicken*/ false,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg));
ASSERT_TRUE(vdex != nullptr);
EXPECT_FALSE(vdex->HasDexSection()) << output_;
}
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr) << dex_location;
std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1847,7 +1837,7 @@
std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_location.c_str()));
ASSERT_TRUE(vdex_file != nullptr);
ASSERT_GT(vdex_file->GetLength(), 0u);
- FILE* file = fdopen(dm_file.GetFd(), "w+b");
+ FILE* file = fdopen(dup(dm_file.GetFd()), "w+b");
ZipWriter writer(file);
auto write_all_bytes = [&](File* file) {
std::unique_ptr<uint8_t[]> bytes(new uint8_t[file->GetLength()]);
@@ -1929,8 +1919,8 @@
// Disable cdex since we want to compare against the original dex file
// after unquickening.
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
// Unquicken by running the verify compiler filter on the vdex file and verify it matches.
std::string odex_location2 = GetOdexDir() + "/unquickened.odex";
@@ -1944,8 +1934,8 @@
CompilerFilter::kVerify,
// Disable cdex to avoid needing to write out the shared section.
{ input_vdex, output_vdex, kDisableCompactDex },
- /* expect_success */ true,
- /* use_fd */ true);
+ /*expect_success=*/ true,
+ /*use_fd=*/ true);
}
ASSERT_EQ(vdex_unquickened->Flush(), 0) << "Could not flush and close vdex file";
ASSERT_TRUE(success_);
@@ -1973,7 +1963,7 @@
TEST_F(Dex2oatTest, CompactDexInvalidSource) {
ScratchFile invalid_dex;
{
- FILE* file = fdopen(invalid_dex.GetFd(), "w+b");
+ FILE* file = fdopen(dup(invalid_dex.GetFd()), "w+b");
ZipWriter writer(file);
writer.StartEntry("classes.dex", ZipWriter::kAlign32);
DexFile::Header header = {};
@@ -2015,7 +2005,7 @@
// Create a zip containing the invalid dex.
ScratchFile invalid_dex_zip;
{
- FILE* file = fdopen(invalid_dex_zip.GetFd(), "w+b");
+ FILE* file = fdopen(dup(invalid_dex_zip.GetFd()), "w+b");
ZipWriter writer(file);
writer.StartEntry("classes.dex", ZipWriter::kCompress);
ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0);
@@ -2062,14 +2052,13 @@
[](const OatFile&) {});
// Open our generated oat file.
std::string error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
odex_location.c_str(),
odex_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
odex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ImageHeader header = {};
@@ -2082,6 +2071,76 @@
EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
}
+TEST_F(Dex2oatTest, AppImageResolveStrings) {
+ if (!ClassLinker::kAppImageMayContainStrings) {
+ TEST_DISABLED();
+ }
+ using Hotness = ProfileCompilationInfo::MethodHotness;
+ // Create a profile with the startup method marked.
+ ScratchFile profile_file;
+ std::vector<uint16_t> methods;
+ {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("StringLiterals"));
+ for (size_t method_idx = 0; method_idx < dex->NumMethodIds(); ++method_idx) {
+ if (std::string(dex->GetMethodName(dex->GetMethodId(method_idx))) == "startUpMethod") {
+ methods.push_back(method_idx);
+ }
+ }
+ ASSERT_GT(methods.size(), 0u);
+ // Here, we build the profile from the method lists.
+ ProfileCompilationInfo info;
+ info.AddMethodsForDex(Hotness::kFlagStartup, dex.get(), methods.begin(), methods.end());
+ // Save the profile since we want to use it with dex2oat to produce an oat file.
+ ASSERT_TRUE(info.Save(profile_file.GetFd()));
+ }
+ const std::string out_dir = GetScratchDir();
+ const std::string odex_location = out_dir + "/base.odex";
+ const std::string app_image_location = out_dir + "/base.art";
+ GenerateOdexForTest(GetTestDexFileName("StringLiterals"),
+ odex_location,
+ CompilerFilter::Filter::kSpeedProfile,
+ { "--app-image-file=" + app_image_location,
+ "--resolve-startup-const-strings=true",
+ "--profile-file=" + profile_file.GetFilename()},
+ /*expect_success=*/ true,
+ /*use_fd=*/ false,
+ [](const OatFile&) {});
+ // Open our generated oat file.
+ std::string error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+ odex_location.c_str(),
+ odex_location.c_str(),
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ odex_location.c_str(),
+ /*reservation=*/ nullptr,
+ &error_msg));
+ ASSERT_TRUE(odex_file != nullptr);
+ // Check the strings in the app image intern table only contain the "startup" strigs.
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<gc::space::ImageSpace> space =
+ gc::space::ImageSpace::CreateFromAppImage(app_image_location.c_str(),
+ odex_file.get(),
+ &error_msg);
+ ASSERT_TRUE(space != nullptr) << error_msg;
+ std::set<std::string> seen;
+ InternTable intern_table;
+ intern_table.AddImageStringsToTable(space.get(), [&](InternTable::UnorderedSet& interns)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (const GcRoot<mirror::String>& str : interns) {
+ seen.insert(str.Read()->ToModifiedUtf8());
+ }
+ });
+ EXPECT_TRUE(seen.find("Loading ") != seen.end());
+ EXPECT_TRUE(seen.find("Starting up") != seen.end());
+ EXPECT_TRUE(seen.find("abcd.apk") != seen.end());
+ EXPECT_TRUE(seen.find("Unexpected error") == seen.end());
+ EXPECT_TRUE(seen.find("Shutting down!") == seen.end());
+ }
+}
+
+
TEST_F(Dex2oatClassLoaderContextTest, StoredClassLoaderContext) {
std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
const std::string out_dir = GetScratchDir();
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 852293b..9a7f93d 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -68,7 +68,7 @@
debug_info_(debug_info) {
}
- void Run(Thread*) {
+ void Run(Thread*) override {
result_ = debug::MakeMiniDebugInfo(isa_,
instruction_set_features_,
text_section_address_,
diff --git a/dex2oat/linker/elf_writer_test.cc b/dex2oat/linker/elf_writer_test.cc
index 40495f3..1d578ab 100644
--- a/dex2oat/linker/elf_writer_test.cc
+++ b/dex2oat/linker/elf_writer_test.cc
@@ -34,7 +34,7 @@
class ElfWriterTest : public CommonCompilerTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
ReserveImageSpace();
CommonCompilerTest::SetUp();
}
@@ -164,7 +164,7 @@
// Patch manually.
std::vector<uint8_t> expected = initial_data;
for (uintptr_t location : patch_locations) {
- typedef __attribute__((__aligned__(1))) uint32_t UnalignedAddress;
+ using UnalignedAddress __attribute__((__aligned__(1))) = uint32_t;
*reinterpret_cast<UnalignedAddress*>(expected.data() + location) += delta;
}
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index 96c48b8..b628c9e 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -79,8 +79,6 @@
/*boot_oat_begin*/0U,
/*boot_oat_size_*/0U,
sizeof(void*),
- /*compile_pic*/false,
- /*is_pic*/false,
ImageHeader::kDefaultStorageMode,
/*data_size*/0u);
ASSERT_TRUE(image_header.IsValid());
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index d575420..443ee52 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -214,7 +214,6 @@
// TODO: compile_pic should be a test argument.
std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_options_,
kRequestedImageBase,
- /*compile_pic*/false,
/*compile_app_image*/false,
storage_mode,
oat_filename_vector,
@@ -340,7 +339,8 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u);
+ bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(),
+ /* image_file_location_oat_checksum */ 0u);
ASSERT_TRUE(header_ok);
writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 6a13454..6410c7a 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -53,6 +53,7 @@
#include "handle_scope-inl.h"
#include "image.h"
#include "imt_conflict_table.h"
+#include "intern_table-inl.h"
#include "jni/jni_internal.h"
#include "linear_alloc.h"
#include "lock_word.h"
@@ -200,41 +201,127 @@
bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
target_ptr_size_ = InstructionSetPointerSize(compiler_options_.GetInstructionSet());
+
+ Thread* const self = Thread::Current();
+
gc::Heap* const heap = Runtime::Current()->GetHeap();
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
{
TimingLogger::ScopedTiming t("PruneNonImageClasses", timings);
PruneNonImageClasses(); // Remove junk
}
+
if (compile_app_image_) {
TimingLogger::ScopedTiming t("ClearDexFileCookies", timings);
// Clear dex file cookies for app images to enable app image determinism. This is required
// since the cookie field contains long pointers to DexFiles which are not deterministic.
// b/34090128
ClearDexFileCookies();
- } else {
- TimingLogger::ScopedTiming t("ComputeLazyFieldsForImageClasses", timings);
- // Avoid for app image since this may increase RAM and image size.
- ComputeLazyFieldsForImageClasses(); // Add useful information
}
}
+
{
TimingLogger::ScopedTiming t("CollectGarbage", timings);
heap->CollectGarbage(/* clear_soft_references */ false); // Remove garbage.
}
if (kIsDebugBuild) {
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
CheckNonImageClassesRemoved();
}
+ // Used to store information that will later be used to calculate image
+ // offsets to string references in the AppImage.
+ std::vector<RefInfoPair> string_ref_info;
+ if (ClassLinker::kAppImageMayContainStrings && compile_app_image_) {
+ // Count the number of string fields so we can allocate the appropriate
+ // amount of space in the image section.
+ TimingLogger::ScopedTiming t("AppImage:CollectStringReferenceInfo", timings);
+ ScopedObjectAccess soa(self);
+
+ if (kIsDebugBuild) {
+ VerifyNativeGCRootInvariants();
+ CHECK_EQ(image_infos_.size(), 1u);
+ }
+
+ string_ref_info = CollectStringReferenceInfo();
+ image_infos_.back().num_string_references_ = string_ref_info.size();
+ }
+
{
TimingLogger::ScopedTiming t("CalculateNewObjectOffsets", timings);
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
CalculateNewObjectOffsets();
}
+ // Obtain class count for debugging purposes
+ if (VLOG_IS_ON(compiler) && compile_app_image_) {
+ ScopedObjectAccess soa(self);
+
+ size_t app_image_class_count = 0;
+
+ for (ImageInfo& info : image_infos_) {
+ info.class_table_->Visit([&](ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!IsInBootImage(klass.Ptr())) {
+ ++app_image_class_count;
+ }
+
+ // Indicate that we would like to continue visiting classes.
+ return true;
+ });
+ }
+
+ VLOG(compiler) << "Dex2Oat:AppImage:classCount = " << app_image_class_count;
+ }
+
+ if (ClassLinker::kAppImageMayContainStrings && compile_app_image_) {
+ // Use the string reference information obtained earlier to calculate image
+ // offsets. These will later be written to the image by Write/CopyMetadata.
+ TimingLogger::ScopedTiming t("AppImage:CalculateImageOffsets", timings);
+ ScopedObjectAccess soa(self);
+
+ size_t managed_string_refs = 0,
+ native_string_refs = 0;
+
+ /*
+ * Iterate over the string reference info and calculate image offsets.
+ * The first element of the pair is the object the reference belongs to
+ * and the second element is the offset to the field. If the offset has
+ * a native ref tag 1) the object is a DexCache and 2) the offset needs
+ * to be calculated using the relocation information for the DexCache's
+ * strings array.
+ */
+ for (const RefInfoPair& ref_info : string_ref_info) {
+ uint32_t image_offset;
+
+ if (HasNativeRefTag(ref_info.second)) {
+ ++native_string_refs;
+
+ // Only DexCaches can contain native references to Java strings.
+ ObjPtr<mirror::DexCache> dex_cache(ref_info.first->AsDexCache());
+
+ // No need to set or clear native ref tags. The existing tag will be
+ // carried forward.
+ image_offset = native_object_relocations_[dex_cache->GetStrings()].offset +
+ ref_info.second;
+ } else {
+ ++managed_string_refs;
+ image_offset = GetImageOffset(ref_info.first) + ref_info.second;
+ }
+
+ string_reference_offsets_.push_back(image_offset);
+ }
+
+ CHECK_EQ(image_infos_.back().num_string_references_,
+ string_reference_offsets_.size());
+
+ VLOG(compiler) << "Dex2Oat:AppImage:stringReferences = " << string_reference_offsets_.size();
+ VLOG(compiler) << "Dex2Oat:AppImage:managedStringReferences = " << managed_string_refs;
+ VLOG(compiler) << "Dex2Oat:AppImage:nativeStringReferences = " << native_string_refs;
+ }
+
// This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
// bin size sums being calculated.
TimingLogger::ScopedTiming t("AllocMemory", timings);
@@ -245,6 +332,252 @@
return true;
}
+class ImageWriter::CollectStringReferenceVisitor {
+ public:
+ explicit CollectStringReferenceVisitor(const ImageWriter& image_writer)
+ : dex_cache_string_ref_counter_(0),
+ image_writer_(image_writer) {}
+
+ // Used to prevent repeated null checks in the code that calls the visitor.
+ ALWAYS_INLINE
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ /*
+ * Counts the number of native references to strings reachable through
+ * DexCache objects for verification later.
+ */
+ ALWAYS_INLINE
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
+
+ if (curr_obj_->IsDexCache() &&
+ image_writer_.IsValidAppImageStringReference(referred_obj)) {
+ ++dex_cache_string_ref_counter_;
+ }
+ }
+
+ // Collects info for Java fields that reference Java Strings.
+ ALWAYS_INLINE
+ void operator() (ObjPtr<mirror::Object> obj,
+ MemberOffset member_offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Object> referred_obj =
+ obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+ member_offset);
+
+ if (image_writer_.IsValidAppImageStringReference(referred_obj)) {
+ string_ref_info_.emplace_back(obj.Ptr(), member_offset.Uint32Value());
+ }
+ }
+
+ ALWAYS_INLINE
+ void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
+ }
+
+ // Used by the wrapper function to obtain a native reference count.
+ size_t GetDexCacheStringRefCounter() const {
+ return dex_cache_string_ref_counter_;
+ }
+
+ // Resets the native reference count.
+ void ResetDexCacheStringRefCounter() {
+ dex_cache_string_ref_counter_ = 0;
+ }
+
+ ObjPtr<mirror::Object> curr_obj_;
+ mutable std::vector<RefInfoPair> string_ref_info_;
+
+ private:
+ mutable size_t dex_cache_string_ref_counter_;
+ const ImageWriter& image_writer_;
+};
+
+std::vector<ImageWriter::RefInfoPair> ImageWriter::CollectStringReferenceInfo() const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ CollectStringReferenceVisitor visitor(*this);
+
+ heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!IsInBootImage(object.Ptr())) {
+ // Many native GC roots are wrapped in std::atomics. Due to the
+ // semantics of atomic objects we can't actually visit the addresses of
+ // the native GC roots. Instead the visiting functions will pass the
+ // visitor the address of a temporary copy of the native GC root and, if
+ // it is changed, copy it back into the original location.
+ //
+ // This analysis requires the actual address of the native GC root so
+ // we will only count them in the visitor and then collect them manually
+ // afterwards. This count will then be used to verify that we collected
+ // all native GC roots.
+ visitor.curr_obj_ = object;
+ if (object->IsDexCache()) {
+ object->VisitReferences</* kVisitNativeRoots */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+
+ ObjPtr<mirror::DexCache> dex_cache = object->AsDexCache();
+ size_t new_native_ref_counter = 0;
+
+ for (size_t string_index = 0; string_index < dex_cache->NumStrings(); ++string_index) {
+ mirror::StringDexCachePair dc_pair = dex_cache->GetStrings()[string_index].load();
+ mirror::Object* referred_obj = dc_pair.object.AddressWithoutBarrier()->AsMirrorPtr();
+
+ if (IsValidAppImageStringReference(referred_obj)) {
+ ++new_native_ref_counter;
+
+ uint32_t string_vector_offset =
+ (string_index * sizeof(mirror::StringDexCachePair)) +
+ offsetof(mirror::StringDexCachePair, object);
+
+ visitor.string_ref_info_.emplace_back(object.Ptr(),
+ SetNativeRefTag(string_vector_offset));
+ }
+ }
+
+ CHECK_EQ(visitor.GetDexCacheStringRefCounter(), new_native_ref_counter);
+ } else {
+ object->VisitReferences</* kVisitNativeRoots */ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+ }
+
+ visitor.ResetDexCacheStringRefCounter();
+ }
+ });
+
+ return std::move(visitor.string_ref_info_);
+}
+
+class ImageWriter::NativeGCRootInvariantVisitor {
+ public:
+ explicit NativeGCRootInvariantVisitor(const ImageWriter& image_writer) :
+ image_writer_(image_writer) {}
+
+ ALWAYS_INLINE
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ ALWAYS_INLINE
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
+
+ if (curr_obj_->IsClass()) {
+ class_violation = class_violation ||
+ image_writer_.IsValidAppImageStringReference(referred_obj);
+
+ } else if (curr_obj_->IsClassLoader()) {
+ class_loader_violation = class_loader_violation ||
+ image_writer_.IsValidAppImageStringReference(referred_obj);
+
+ } else if (!curr_obj_->IsDexCache()) {
+ LOG(FATAL) << "Dex2Oat:AppImage | " <<
+ "Native reference to String found in unexpected object type.";
+ }
+ }
+
+ ALWAYS_INLINE
+ void operator() (ObjPtr<mirror::Object> obj ATTRIBUTE_UNUSED,
+ MemberOffset member_offset ATTRIBUTE_UNUSED,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {}
+
+ ALWAYS_INLINE
+ void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {}
+
+ // Returns true iff the only reachable native string references are through DexCache objects.
+ bool InvariantsHold() const {
+ return !(class_violation || class_loader_violation);
+ }
+
+ ObjPtr<mirror::Object> curr_obj_;
+ mutable bool class_violation = false,
+ class_loader_violation = false;
+
+ private:
+ const ImageWriter& image_writer_;
+};
+
+void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+
+ NativeGCRootInvariantVisitor visitor(*this);
+
+ heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ visitor.curr_obj_ = object;
+
+ if (!IsInBootImage(object.Ptr())) {
+ object->VisitReferences</* kVisitNativeRefernces */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+ }
+ });
+
+ bool error = false;
+ std::ostringstream error_str;
+
+ /*
+ * Build the error string
+ */
+
+ if (UNLIKELY(visitor.class_violation)) {
+ error_str << "Class";
+ error = true;
+ }
+
+ if (UNLIKELY(visitor.class_loader_violation)) {
+ if (error) {
+ error_str << ", ";
+ }
+
+ error_str << "ClassLoader";
+ }
+
+ CHECK(visitor.InvariantsHold()) <<
+ "Native GC root invariant failure. String refs reachable through the following objects: " <<
+ error_str.str();
+}
+
+void ImageWriter::CopyMetadata() {
+ CHECK(compile_app_image_);
+ CHECK_EQ(image_infos_.size(), 1u);
+
+ const ImageInfo& image_info = image_infos_.back();
+ std::vector<ImageSection> image_sections = image_info.CreateImageSections().second;
+
+ uint32_t* sfo_section_base = reinterpret_cast<uint32_t*>(
+ image_info.image_.Begin() +
+ image_sections[ImageHeader::kSectionStringReferenceOffsets].Offset());
+
+ std::copy(string_reference_offsets_.begin(),
+ string_reference_offsets_.end(),
+ sfo_section_base);
+}
+
+bool ImageWriter::IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const {
+ return referred_obj != nullptr &&
+ !IsInBootImage(referred_obj.Ptr()) &&
+ referred_obj->IsString();
+}
+
bool ImageWriter::Write(int image_fd,
const std::vector<const char*>& image_filenames,
const std::vector<const char*>& oat_filenames) {
@@ -272,6 +605,10 @@
CopyAndFixupObjects();
}
+ if (compile_app_image_) {
+ CopyMetadata();
+ }
+
for (size_t i = 0; i < image_filenames.size(); ++i) {
const char* image_filename = image_filenames[i];
ImageInfo& image_info = GetImageInfo(i);
@@ -370,8 +707,23 @@
return false;
}
+ if (VLOG_IS_ON(compiler)) {
+ size_t separately_written_section_size = bitmap_section.Size() +
+ image_header->GetImageRelocationsSection().Size() +
+ sizeof(ImageHeader);
+
+ size_t total_uncompressed_size = raw_image_data.size() + separately_written_section_size,
+ total_compressed_size = image_data.size() + separately_written_section_size;
+
+ VLOG(compiler) << "Dex2Oat:uncompressedImageSize = " << total_uncompressed_size;
+ if (total_uncompressed_size != total_compressed_size) {
+ VLOG(compiler) << "Dex2Oat:compressedImageSize = " << total_compressed_size;
+ }
+ }
+
CHECK_EQ(relocations_position_in_file + relocations.size(),
static_cast<size_t>(image_file->GetLength()));
+
if (image_file->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
return false;
@@ -725,9 +1077,7 @@
bool ImageWriter::AllocMemory() {
for (ImageInfo& image_info : image_infos_) {
- ImageSection unused_sections[ImageHeader::kSectionCount];
- const size_t length = RoundUp(
- image_info.CreateImageSections(unused_sections), kPageSize);
+ const size_t length = RoundUp(image_info.CreateImageSections().first, kPageSize);
std::string error_msg;
image_info.image_ = MemMap::MapAnonymous("image writer image",
@@ -753,21 +1103,6 @@
return true;
}
-class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
- public:
- bool operator()(ObjPtr<Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
- StackHandleScope<1> hs(Thread::Current());
- mirror::Class::ComputeName(hs.NewHandle(c));
- return true;
- }
-};
-
-void ImageWriter::ComputeLazyFieldsForImageClasses() {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ComputeLazyFieldsForClassesVisitor visitor;
- class_linker->VisitClassesWithoutClassesLock(&visitor);
-}
-
static bool IsBootClassLoaderClass(ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
return klass->GetClassLoader() == nullptr;
@@ -1798,9 +2133,9 @@
}
ProcessWorkStack(&work_stack);
- // For app images, there may be objects that are only held live by the by the boot image. One
+ // For app images, there may be objects that are only held live by the boot image. One
// example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback
- // does not fail any checks. TODO: We should probably avoid copying these objects.
+ // does not fail any checks.
if (compile_app_image_) {
for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) {
DCHECK(space->IsImageSpace());
@@ -1892,9 +2227,7 @@
for (ImageInfo& image_info : image_infos_) {
image_info.image_begin_ = global_image_begin_ + image_offset;
image_info.image_offset_ = image_offset;
- ImageSection unused_sections[ImageHeader::kSectionCount];
- image_info.image_size_ =
- RoundUp(image_info.CreateImageSections(unused_sections), kPageSize);
+ image_info.image_size_ = RoundUp(image_info.CreateImageSections().first, kPageSize);
// There should be no gaps until the next image.
image_offset += image_info.image_size_;
}
@@ -1928,58 +2261,94 @@
boot_image_live_objects_ = boot_image_live_objects.Get();
}
-size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
- DCHECK(out_sections != nullptr);
+std::pair<size_t, std::vector<ImageSection>> ImageWriter::ImageInfo::CreateImageSections() const {
+ std::vector<ImageSection> sections(ImageHeader::kSectionCount);
- // Do not round up any sections here that are represented by the bins since it will break
- // offsets.
+ // Do not round up any sections here that are represented by the bins since it
+ // will break offsets.
- // Objects section
- ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects];
- *objects_section = ImageSection(0u, image_end_);
+ /*
+ * Objects section
+ */
+ sections[ImageHeader::kSectionObjects] =
+ ImageSection(0u, image_end_);
- // Add field section.
- ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields];
- *field_section = ImageSection(GetBinSlotOffset(Bin::kArtField), GetBinSlotSize(Bin::kArtField));
+ /*
+ * Field section
+ */
+ sections[ImageHeader::kSectionArtFields] =
+ ImageSection(GetBinSlotOffset(Bin::kArtField), GetBinSlotSize(Bin::kArtField));
- // Add method section.
- ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods];
- *methods_section = ImageSection(
- GetBinSlotOffset(Bin::kArtMethodClean),
- GetBinSlotSize(Bin::kArtMethodClean) + GetBinSlotSize(Bin::kArtMethodDirty));
+ /*
+ * Method section
+ */
+ sections[ImageHeader::kSectionArtMethods] =
+ ImageSection(GetBinSlotOffset(Bin::kArtMethodClean),
+ GetBinSlotSize(Bin::kArtMethodClean) +
+ GetBinSlotSize(Bin::kArtMethodDirty));
- // IMT section.
- ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables];
- *imt_section = ImageSection(GetBinSlotOffset(Bin::kImTable), GetBinSlotSize(Bin::kImTable));
+ /*
+ * IMT section
+ */
+ sections[ImageHeader::kSectionImTables] =
+ ImageSection(GetBinSlotOffset(Bin::kImTable), GetBinSlotSize(Bin::kImTable));
- // Conflict tables section.
- ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables];
- *imt_conflict_tables_section = ImageSection(GetBinSlotOffset(Bin::kIMTConflictTable),
- GetBinSlotSize(Bin::kIMTConflictTable));
+ /*
+ * Conflict Tables section
+ */
+ sections[ImageHeader::kSectionIMTConflictTables] =
+ ImageSection(GetBinSlotOffset(Bin::kIMTConflictTable), GetBinSlotSize(Bin::kIMTConflictTable));
- // Runtime methods section.
- ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods];
- *runtime_methods_section = ImageSection(GetBinSlotOffset(Bin::kRuntimeMethod),
- GetBinSlotSize(Bin::kRuntimeMethod));
+ /*
+ * Runtime Methods section
+ */
+ sections[ImageHeader::kSectionRuntimeMethods] =
+ ImageSection(GetBinSlotOffset(Bin::kRuntimeMethod), GetBinSlotSize(Bin::kRuntimeMethod));
- // Add dex cache arrays section.
- ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays];
- *dex_cache_arrays_section = ImageSection(GetBinSlotOffset(Bin::kDexCacheArray),
- GetBinSlotSize(Bin::kDexCacheArray));
+ /*
+ * DexCache Arrays section.
+ */
+ const ImageSection& dex_cache_arrays_section =
+ sections[ImageHeader::kSectionDexCacheArrays] =
+ ImageSection(GetBinSlotOffset(Bin::kDexCacheArray),
+ GetBinSlotSize(Bin::kDexCacheArray));
+
+ /*
+ * Interned Strings section
+ */
+
// Round up to the alignment the string table expects. See HashSet::WriteToMemory.
- size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t));
- // Calculate the size of the interned strings.
- ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings];
- *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_);
- cur_pos = interned_strings_section->End();
- // Round up to the alignment the class table expects. See HashSet::WriteToMemory.
- cur_pos = RoundUp(cur_pos, sizeof(uint64_t));
- // Calculate the size of the class table section.
- ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable];
- *class_table_section = ImageSection(cur_pos, class_table_bytes_);
- cur_pos = class_table_section->End();
- // Image end goes right before the start of the image bitmap.
- return cur_pos;
+ size_t cur_pos = RoundUp(dex_cache_arrays_section.End(), sizeof(uint64_t));
+
+ const ImageSection& interned_strings_section =
+ sections[ImageHeader::kSectionInternedStrings] =
+ ImageSection(cur_pos, intern_table_bytes_);
+
+ /*
+ * Class Table section
+ */
+
+ // Obtain the new position and round it up to the appropriate alignment.
+ cur_pos = RoundUp(interned_strings_section.End(), sizeof(uint64_t));
+
+ const ImageSection& class_table_section =
+ sections[ImageHeader::kSectionClassTable] =
+ ImageSection(cur_pos, class_table_bytes_);
+
+ /*
+ * String Field Offsets section
+ */
+
+ // Round up to the alignment of the offsets we are going to store.
+ cur_pos = RoundUp(class_table_section.End(), sizeof(uint32_t));
+
+ const ImageSection& string_reference_offsets =
+ sections[ImageHeader::kSectionStringReferenceOffsets] =
+ ImageSection(cur_pos, sizeof(uint32_t) * num_string_references_);
+
+ // Return the number of bytes described by these sections, and the sections
+ // themselves.
+ return make_pair(string_reference_offsets.End(), std::move(sections));
}
void ImageWriter::CreateHeader(size_t oat_index) {
@@ -1989,8 +2358,9 @@
const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_;
// Create the image sections.
- ImageSection sections[ImageHeader::kSectionCount];
- const size_t image_end = image_info.CreateImageSections(sections);
+ auto section_info_pair = image_info.CreateImageSections();
+ const size_t image_end = section_info_pair.first;
+ std::vector<ImageSection>& sections = section_info_pair.second;
// Finally bitmap section.
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
@@ -2027,7 +2397,7 @@
ImageHeader* header = new (image_info.image_.Begin()) ImageHeader(
PointerToLowMemUInt32(image_info.image_begin_),
image_end,
- sections,
+ sections.data(),
image_info.image_roots_address_,
image_info.oat_checksum_,
PointerToLowMemUInt32(oat_file_begin),
@@ -2039,8 +2409,6 @@
boot_oat_begin,
boot_oat_end - boot_oat_begin,
static_cast<uint32_t>(target_ptr_size_),
- compile_pic_,
- /*is_pic*/compile_app_image_,
image_storage_mode_,
/*data_size*/0u);
@@ -2243,7 +2611,7 @@
// the VisitRoots() will update the memory directly rather than the copies.
// This also relies on visit roots not doing any verification which could fail after we update
// the roots to be the image addresses.
- temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
+ temp_intern_table.AddTableFromMemory(intern_table_memory_ptr, VoidFunctor());
CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
// Record relocations. (The root visitor does not get to see the slot addresses.)
@@ -2970,7 +3338,6 @@
ImageWriter::ImageWriter(
const CompilerOptions& compiler_options,
uintptr_t image_begin,
- bool compile_pic,
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
@@ -2979,7 +3346,6 @@
: compiler_options_(compiler_options),
global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
image_objects_offset_begin_(0),
- compile_pic_(compile_pic),
compile_app_image_(compile_app_image),
target_ptr_size_(InstructionSetPointerSize(compiler_options.GetInstructionSet())),
image_infos_(oat_filenames.size()),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index e45023e..93e4be5 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -77,13 +77,28 @@
public:
ImageWriter(const CompilerOptions& compiler_options,
uintptr_t image_begin,
- bool compile_pic,
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
const HashSet<std::string>* dirty_image_objects);
+ /*
+ * Modifies the heap and collects information about objects and code so that
+ * they can be written to the boot or app image later.
+ *
+ * First, unneeded classes are removed from the managed heap. Next, we
+ * remove cached values and calculate necessary metadata for later in the
+ * process. Optionally some debugging information is collected and used to
+ * verify the state of the heap at this point. Next, metadata from earlier
+ * is used to calculate offsets of references to strings to speed up string
+ * interning when the image is loaded. Lastly, we allocate enough memory to
+ * fit all image data minus the bitmap and relocation sections.
+ *
+ * This function should only be called when all objects to be included in the
+ * image have been initialized and all native methods have been generated. In
+ * addition, no other thread should be modifying the heap.
+ */
bool PrepareImageAddressSpace(TimingLogger* timings);
bool IsImageAddressSpaceReady() const {
@@ -270,9 +285,18 @@
ImageInfo();
ImageInfo(ImageInfo&&) = default;
- // Create the image sections into the out sections variable, returns the size of the image
- // excluding the bitmap.
- size_t CreateImageSections(ImageSection* out_sections) const;
+ /*
+ * Creates ImageSection objects that describe most of the sections of a
+ * boot or AppImage. The following sections are not included:
+ * - ImageHeader::kSectionImageBitmap
+ * - ImageHeader::kSectionImageRelocations
+ *
+ * In addition, the ImageHeader is not covered here.
+ *
+ * This function will return the total size of the covered sections as well
+ * as a vector containing the individual ImageSection objects.
+ */
+ std::pair<size_t, std::vector<ImageSection>> CreateImageSections() const;
size_t GetStubOffset(StubType stub_type) const {
DCHECK_LT(static_cast<size_t>(stub_type), kNumberOfStubTypes);
@@ -364,6 +388,10 @@
// Number of pointer fixup bytes.
size_t pointer_fixup_bytes_ = 0;
+ // Number of offsets to string references that will be written to the
+ // StringFieldOffsets section.
+ size_t num_string_references_ = 0;
+
// Intern table associated with this image for serialization.
std::unique_ptr<InternTable> intern_table_;
@@ -429,10 +457,6 @@
// Debug aid that list of requested image classes.
void DumpImageClasses();
- // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
- void ComputeLazyFieldsForImageClasses()
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Visit all class loaders.
void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -478,6 +502,21 @@
ImtConflictTable* copy,
size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
+
+ /*
+ * Copies metadata from the heap into a buffer that will be compressed and
+ * written to the image.
+ *
+ * This function copies the string offset metadata from a local vector to an
+ * offset inside the image_ field of an ImageInfo struct. The offset into the
+ * memory pointed to by the image_ field is obtained from the ImageSection
+ * object for the String Offsets section.
+ *
+ * All data for the image, besides the object bitmap and the relocation data,
+ * will also be copied into the memory region pointed to by image_.
+ */
+ void CopyMetadata();
+
template <bool kCheckNotNull = true>
void RecordImageRelocation(const void* dest, size_t oat_index, bool app_to_boot_image = false);
void FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index)
@@ -558,6 +597,60 @@
std::unordered_set<mirror::Object*>* visited)
REQUIRES_SHARED(Locks::mutator_lock_);
+ /*
+ * A pair containing the information necessary to calculate the position of a
+ * managed object's field or native reference inside an AppImage.
+ *
+ * The first element of this pair is a raw mirror::Object pointer because its
+ * usage will cross a suspend point and ObjPtr would produce a false positive.
+ *
+ * The second element is an offset either into the object or into the string
+ * array of a DexCache object.
+ *
+ * TODO (chriswailes): Add a note indicating the source line where we ensure
+ * that no moving garbage collection will occur.
+ */
+ typedef std::pair<mirror::Object*, uint32_t> RefInfoPair;
+
+ /*
+ * Collects the info necessary for calculating image offsets to string field
+ * later.
+ *
+ * This function is used when constructing AppImages. Because AppImages
+ * contain strings that must be interned we need to visit references to these
+ * strings when the AppImage is loaded and either insert them into the
+ * runtime intern table or replace the existing reference with a reference
+ * to the interned strings.
+ *
+ * To speed up the interning of strings when the AppImage is loaded we include
+ * a list of offsets to string references in the AppImage. These are then
+ * iterated over at load time and fixed up.
+ *
+ * To record the offsets we first have to count the number of string
+ * references that will be included in the AppImage. This allows use to both
+ * allocate enough memory for soring the offsets and correctly calculate the
+ * offsets of various objects into the image. Once the image offset
+ * calculations are done for Java objects the reference object/offset pairs
+ * are translated to image offsets. The CopyMetadata function then copies
+ * these offsets into the image.
+ *
+ * A vector containing pairs of object pointers and offsets. The offsets are
+ * tagged to indicate if the offset is for a field of a mirror object or a
+ * native reference. If the offset is tagged as a native reference it must
+ * have come from a DexCache's string array.
+ */
+ std::vector<RefInfoPair> CollectStringReferenceInfo() const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ /*
+ * Ensures that assumptions about native GC roots and AppImages hold.
+ *
+ * This function verifies the following condition(s):
+ * - Native references to Java strings are only reachable through DexCache
+ * objects
+ */
+ void VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+
bool IsMultiImage() const {
return image_infos_.size() > 1;
}
@@ -632,6 +725,18 @@
void CopyAndFixupPointer(void* object, MemberOffset offset, void* value, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
+ /*
+ * Tests an object to see if it will be contained in an AppImage.
+ *
+ * An object reference is considered to be a AppImage String reference iff:
+ * - It isn't null
+ * - The referred-object isn't in the boot image
+ * - The referred-object is a Java String
+ */
+ ALWAYS_INLINE
+ bool IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
const CompilerOptions& compiler_options_;
// Beginning target image address for the first image.
@@ -652,7 +757,6 @@
std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
// Boolean flags.
- const bool compile_pic_;
const bool compile_app_image_;
// Size of pointers on the target architecture.
@@ -684,6 +788,9 @@
// Boot image live objects, null for app image.
mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
+ // Offsets into the image that indicate where string references are recorded.
+ std::vector<uint32_t> string_reference_offsets_;
+
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
@@ -704,9 +811,23 @@
class NativeLocationVisitor;
class PruneClassesVisitor;
class PruneClassLoaderClassesVisitor;
+ class PruneObjectReferenceVisitor;
class RegisterBootClassPathClassesVisitor;
class VisitReferencesVisitor;
- class PruneObjectReferenceVisitor;
+
+ /*
+ * A visitor class for extracting object/offset pairs.
+ *
+ * This visitor walks the fields of an object and extracts object/offset pairs
+ * that are later translated to image offsets. This visitor is only
+ * responsible for extracting info for Java references. Native references to
+ * Java strings are handled in the wrapper function
+ * CollectStringReferenceInfo().
+ */
+ class CollectStringReferenceVisitor;
+
+ // A visitor used by the VerifyNativeGCRootInvariants() function.
+ class NativeGCRootInvariantVisitor;
DISALLOW_COPY_AND_ASSIGN(ImageWriter);
};
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index a5831b6..2610561 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -96,12 +96,12 @@
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch ATTRIBUTE_UNUSED,
- uint32_t patch_offset ATTRIBUTE_UNUSED) {
+ uint32_t patch_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNIMPLEMENTED";
}
std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(
- uint32_t executable_offset ATTRIBUTE_UNUSED) {
+ uint32_t executable_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "UNIMPLEMENTED";
UNREACHABLE();
}
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index e8f57f5..acd49d5 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -92,10 +92,10 @@
static constexpr bool kOatWriterDebugOatCodeLayout = false;
-typedef DexFile::Header __attribute__((aligned(1))) UnalignedDexFileHeader;
+using UnalignedDexFileHeader __attribute__((__aligned__(1))) = DexFile::Header;
const UnalignedDexFileHeader* AsUnalignedDexFileHeader(const uint8_t* raw_data) {
- return reinterpret_cast<const UnalignedDexFileHeader*>(raw_data);
+ return reinterpret_cast<const UnalignedDexFileHeader*>(raw_data);
}
class ChecksumUpdatingOutputStream : public OutputStream {
@@ -1671,7 +1671,7 @@
}
}
- virtual bool VisitComplete() {
+ bool VisitComplete() override {
offset_ = writer_->relative_patcher_->WriteThunks(out_, offset_);
if (UNLIKELY(offset_ == 0u)) {
PLOG(ERROR) << "Failed to write final relative call thunks";
@@ -2405,7 +2405,7 @@
if (static_cast<uint32_t>(new_offset) != expected_file_offset) {
PLOG(ERROR) << "Failed to seek to oat code section. Actual: " << new_offset
<< " Expected: " << expected_file_offset << " File: " << out->GetLocation();
- return 0;
+ return false;
}
DCHECK_OFFSET();
@@ -2815,21 +2815,10 @@
return true;
}
-bool OatWriter::WriteHeader(OutputStream* out,
- uint32_t image_file_location_oat_checksum,
- uintptr_t image_file_location_oat_begin,
- int32_t image_patch_delta) {
+bool OatWriter::WriteHeader(OutputStream* out, uint32_t image_file_location_oat_checksum) {
CHECK(write_state_ == WriteState::kWriteHeader);
oat_header_->SetImageFileLocationOatChecksum(image_file_location_oat_checksum);
- oat_header_->SetImageFileLocationOatDataBegin(image_file_location_oat_begin);
- if (GetCompilerOptions().IsBootImage()) {
- CHECK_EQ(image_patch_delta, 0);
- CHECK_EQ(oat_header_->GetImagePatchDelta(), 0);
- } else {
- CHECK_ALIGNED(image_patch_delta, kPageSize);
- oat_header_->SetImagePatchDelta(image_patch_delta);
- }
oat_header_->UpdateChecksumWithHeaderData();
const size_t file_offset = oat_data_offset_;
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 5202d39..c049518 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -198,10 +198,7 @@
// Check the size of the written oat file.
bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
// Write the oat header. This finalizes the oat file.
- bool WriteHeader(OutputStream* out,
- uint32_t image_file_location_oat_checksum,
- uintptr_t image_file_location_oat_begin,
- int32_t image_patch_delta);
+ bool WriteHeader(OutputStream* out, uint32_t image_file_location_oat_checksum);
// Returns whether the oat file has an associated image.
bool HasImage() const {
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index bd09f23..7382208 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -87,7 +87,7 @@
void SetupCompiler(const std::vector<std::string>& compiler_options) {
std::string error_msg;
if (!compiler_options_->ParseCompilerOptions(compiler_options,
- false /* ignore_unrecognized */,
+ /*ignore_unrecognized=*/ false,
&error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
@@ -176,7 +176,7 @@
oat_rodata,
&key_value_store,
verify,
- /* update_input_vdex */ false,
+ /*update_input_vdex=*/ false,
CopyOption::kOnlyIfCompressed,
&opened_dex_files_maps,
&opened_dex_files)) {
@@ -235,7 +235,8 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer.WriteHeader(elf_writer->GetStream(), 42U, 4096U, 0)) {
+ if (!oat_writer.WriteHeader(elf_writer->GetStream(),
+ /*image_file_location_oat_checksum=*/ 42U)) {
return false;
}
@@ -403,21 +404,19 @@
if (kCompile) { // OatWriter strips the code, regenerate to compare
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings);
}
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ true,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ true,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount()); // core
ASSERT_EQ(42U, oat_header.GetImageFileLocationOatChecksum());
- ASSERT_EQ(4096U, oat_header.GetImageFileLocationOatDataBegin());
ASSERT_EQ("lue.art", std::string(oat_header.GetStoreValueByKey(OatHeader::kImageLocationKey)));
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
@@ -464,7 +463,7 @@
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(76U, sizeof(OatHeader));
+ EXPECT_EQ(68U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
@@ -522,18 +521,17 @@
tmp_oat.GetFile(),
dex_files,
key_value_store,
- /* verify */ false);
+ /*verify=*/ false);
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat_file != nullptr);
EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -604,14 +602,13 @@
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
+ /*executable=*/ false,
low_4gb,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
if (low_4gb) {
@@ -727,7 +724,7 @@
input_filenames,
key_value_store,
verify,
- /* profile_compilation_info */ nullptr);
+ /*profile_compilation_info=*/ nullptr);
if (verify) {
ASSERT_FALSE(success);
@@ -735,14 +732,13 @@
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -769,7 +765,7 @@
{
// Test using the AddZipDexFileSource() interface with the zip file handle.
- File zip_fd(dup(zip_file.GetFd()), /* check_usage */ false);
+ File zip_fd(dup(zip_file.GetFd()), /*check_usage=*/ false);
ASSERT_NE(-1, zip_fd.Fd());
ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
@@ -785,14 +781,13 @@
ASSERT_TRUE(success);
std::string error_msg;
- std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -843,7 +838,7 @@
oat_file.GetFile(),
input_filenames,
key_value_store,
- /* verify */ false,
+ /*verify=*/ false,
profile_compilation_info.get());
ASSERT_FALSE(success);
}
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index 564cf30..45a4a22 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -79,7 +79,7 @@
void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
const LinkerPatch& patch ATTRIBUTE_UNUSED,
- uint32_t patch_offset ATTRIBUTE_UNUSED) {
+ uint32_t patch_offset ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "Unexpected baker read barrier branch patch.";
}
diff --git a/dex2oat/linker/x86/relative_patcher_x86_base.cc b/dex2oat/linker/x86/relative_patcher_x86_base.cc
index 6a9690d..07cd724 100644
--- a/dex2oat/linker/x86/relative_patcher_x86_base.cc
+++ b/dex2oat/linker/x86/relative_patcher_x86_base.cc
@@ -50,7 +50,7 @@
uint32_t displacement = target_offset - patch_offset;
displacement -= kPcDisplacement; // The base PC is at the end of the 4-byte patch.
- typedef __attribute__((__aligned__(1))) int32_t unaligned_int32_t;
+ using unaligned_int32_t __attribute__((__aligned__(1))) = int32_t;
reinterpret_cast<unaligned_int32_t*>(&(*code)[literal_offset])[0] = displacement;
}
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
index 9633564..c80f6a9 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
@@ -31,7 +31,7 @@
uint32_t displacement = target_offset - patch_offset;
displacement -= kPcDisplacement; // The base PC is at the end of the 4-byte patch.
- typedef __attribute__((__aligned__(1))) int32_t unaligned_int32_t;
+ using unaligned_int32_t __attribute__((__aligned__(1))) = int32_t;
reinterpret_cast<unaligned_int32_t*>(&(*code)[patch.LiteralOffset()])[0] = displacement;
}
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index ac9a9a2..d15bbda 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -17,12 +17,12 @@
cc_defaults {
name: "dexdump_defaults",
+ defaults: ["art_defaults"],
srcs: [
"dexdump_cfg.cc",
"dexdump_main.cc",
"dexdump.cc",
],
- cflags: ["-Wall", "-Werror"],
}
art_cc_binary {
@@ -38,13 +38,13 @@
art_cc_binary {
name: "dexdumps",
- defaults: ["dexdump_defaults"],
+ defaults: [
+ "dexdump_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ ],
host_supported: true,
device_supported: false,
- static_libs: [
- "libdexfile",
- "libartbase",
- ] + art_static_dependencies,
target: {
darwin: {
enabled: false,
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index e9b6402..6b2a1b9 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -69,14 +69,14 @@
/*
* Data types that match the definitions in the VM specification.
*/
-typedef uint8_t u1;
-typedef uint16_t u2;
-typedef uint32_t u4;
-typedef uint64_t u8;
-typedef int8_t s1;
-typedef int16_t s2;
-typedef int32_t s4;
-typedef int64_t s8;
+using u1 = uint8_t;
+using u2 = uint16_t;
+using u4 = uint32_t;
+using u8 = uint64_t;
+using s1 = int8_t;
+using s2 = int16_t;
+using s4 = int32_t;
+using s8 = int64_t;
/*
* Basic information about a field or a method.
@@ -331,7 +331,7 @@
* NULL-terminated.
*/
static void asciify(char* out, const unsigned char* data, size_t len) {
- while (len--) {
+ for (; len != 0u; --len) {
if (*data < 0x20) {
// Could do more here, but we don't need them yet.
switch (*data) {
@@ -751,24 +751,6 @@
}
/*
- * Callback for dumping each positions table entry.
- */
-static bool dumpPositionsCb(void* /*context*/, const DexFile::PositionInfo& entry) {
- fprintf(gOutFile, " 0x%04x line=%d\n", entry.address_, entry.line_);
- return false;
-}
-
-/*
- * Callback for dumping locals table entry.
- */
-static void dumpLocalsCb(void* /*context*/, const DexFile::LocalInfo& entry) {
- const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
- fprintf(gOutFile, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
- entry.start_address_, entry.end_address_, entry.reg_,
- entry.name_, entry.descriptor_, signature);
-}
-
-/*
* Helper for dumpInstruction(), which builds the string
* representation for the index in the given instruction.
* Returns a pointer to a buffer of sufficient size.
@@ -1201,9 +1183,24 @@
// Positions and locals table in the debug info.
bool is_static = (flags & kAccStatic) != 0;
fprintf(gOutFile, " positions : \n");
- pDexFile->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), dumpPositionsCb, nullptr);
+ accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ fprintf(gOutFile, " 0x%04x line=%d\n", entry.address_, entry.line_);
+ return false;
+ });
fprintf(gOutFile, " locals : \n");
- accessor.DecodeDebugLocalInfo(is_static, idx, dumpLocalsCb, nullptr);
+ accessor.DecodeDebugLocalInfo(is_static,
+ idx,
+ [&](const DexFile::LocalInfo& entry) {
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+ fprintf(gOutFile,
+ " 0x%04x - 0x%04x reg=%d %s %s %s\n",
+ entry.start_address_,
+ entry.end_address_,
+ entry.reg_,
+ entry.name_,
+ entry.descriptor_,
+ signature);
+ });
}
/*
@@ -1803,18 +1800,18 @@
// Iterate over all classes.
char* package = nullptr;
const u4 classDefsSize = pDexFile->GetHeader().class_defs_size_;
- for (u4 i = 0; i < classDefsSize; i++) {
- dumpClass(pDexFile, i, &package);
+ for (u4 j = 0; j < classDefsSize; j++) {
+ dumpClass(pDexFile, j, &package);
} // for
// Iterate over all method handles.
- for (u4 i = 0; i < pDexFile->NumMethodHandles(); ++i) {
- dumpMethodHandle(pDexFile, i);
+ for (u4 j = 0; j < pDexFile->NumMethodHandles(); ++j) {
+ dumpMethodHandle(pDexFile, j);
} // for
// Iterate over all call site ids.
- for (u4 i = 0; i < pDexFile->NumCallSiteIds(); ++i) {
- dumpCallSite(pDexFile, i);
+ for (u4 j = 0; j < pDexFile->NumCallSiteIds(); ++j) {
+ dumpCallSite(pDexFile, j);
} // for
// Free the last package allocated.
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index f4a3866..cf0d113 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -37,7 +37,7 @@
/*
* Shows usage.
*/
-static void usage(void) {
+static void usage() {
LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
LOG(ERROR) << gProgName << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-j] [-l layout] [-o outfile]"
" dexfile...\n";
@@ -64,7 +64,7 @@
gOptions.verbose = true;
// Parse all arguments.
- while (1) {
+ while (true) {
const int ic = getopt(argc, argv, "acdefghijl:o:");
if (ic < 0) {
break; // done
diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc
index 3a2d38d..bb6d4a4 100644
--- a/dexdump/dexdump_test.cc
+++ b/dexdump/dexdump_test.cc
@@ -31,7 +31,7 @@
class DexDumpTest : public CommonRuntimeTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// Dogfood our own lib core dex file.
dex_file_ = GetLibCoreDexFileNames()[0];
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 147af0c..24ee5f8 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -32,6 +32,14 @@
static_libs: ["libz"],
}
+cc_defaults {
+ name: "libart-dexlayout_static_base_defaults",
+ static_libs: [
+ "libbase",
+ "libz",
+ ],
+}
+
art_cc_library {
name: "libart-dexlayout",
defaults: [
@@ -53,6 +61,17 @@
},
}
+cc_defaults {
+ name: "libart-dexlayout_static_defaults",
+ defaults: [
+ "libart-dexlayout_static_base_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ ],
+ static_libs: ["libart-dexlayout"],
+}
+
art_cc_library {
name: "libartd-dexlayout",
defaults: [
@@ -67,6 +86,17 @@
}
cc_defaults {
+ name: "libartd-dexlayout_static_defaults",
+ defaults: [
+ "libart-dexlayout_static_base_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ ],
+ static_libs: ["libartd-dexlayout"],
+}
+
+cc_defaults {
name: "dexlayout-defaults",
defaults: ["art_defaults"],
host_supported: true,
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 00fb0af..a04cfb6 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -26,7 +26,7 @@
namespace art {
CompactDexWriter::CompactDexWriter(DexLayout* dex_layout)
- : DexWriter(dex_layout, /*compute_offsets*/ true) {
+ : DexWriter(dex_layout, /*compute_offsets=*/ true) {
CHECK(GetCompactDexLevel() != CompactDexLevel::kCompactDexLevelNone);
}
@@ -36,7 +36,7 @@
CompactDexWriter::Container::Container(bool dedupe_code_items)
: code_item_dedupe_(dedupe_code_items, &data_section_),
- data_item_dedupe_(/*dedupe*/ true, &data_section_) {}
+ data_item_dedupe_(/*enabled=*/ true, &data_section_) {}
uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
const uint32_t start_offset = stream->Tell();
@@ -211,7 +211,7 @@
CompactDexWriter::Deduper::Deduper(bool enabled, DexContainer::Section* section)
: enabled_(enabled),
- dedupe_map_(/*bucket_count*/ 32,
+ dedupe_map_(/*__n=*/ 32,
HashedMemoryRange::HashEqual(section),
HashedMemoryRange::HashEqual(section)) {}
@@ -406,16 +406,16 @@
// Based on: https://source.android.com/devices/tech/dalvik/dex-format
// Since the offsets may not be calculated already, the writing must be done in the correct order.
const uint32_t string_ids_offset = main_stream->Tell();
- WriteStringIds(main_stream, /*reserve_only*/ true);
+ WriteStringIds(main_stream, /*reserve_only=*/ true);
WriteTypeIds(main_stream);
const uint32_t proto_ids_offset = main_stream->Tell();
- WriteProtoIds(main_stream, /*reserve_only*/ true);
+ WriteProtoIds(main_stream, /*reserve_only=*/ true);
WriteFieldIds(main_stream);
WriteMethodIds(main_stream);
const uint32_t class_defs_offset = main_stream->Tell();
- WriteClassDefs(main_stream, /*reserve_only*/ true);
+ WriteClassDefs(main_stream, /*reserve_only=*/ true);
const uint32_t call_site_ids_offset = main_stream->Tell();
- WriteCallSiteIds(main_stream, /*reserve_only*/ true);
+ WriteCallSiteIds(main_stream, /*reserve_only=*/ true);
WriteMethodHandles(main_stream);
if (compute_offsets_) {
@@ -426,7 +426,7 @@
// Write code item first to minimize the space required for encoded methods.
// For cdex, the code items don't depend on the debug info.
- WriteCodeItems(data_stream, /*reserve_only*/ false);
+ WriteCodeItems(data_stream, /*reserve_only=*/ false);
// Sort the debug infos by method index order, this reduces size by ~0.1% by reducing the size of
// the debug info offset table.
@@ -445,19 +445,19 @@
// Write delayed id sections that depend on data sections.
{
Stream::ScopedSeek seek(main_stream, string_ids_offset);
- WriteStringIds(main_stream, /*reserve_only*/ false);
+ WriteStringIds(main_stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(main_stream, proto_ids_offset);
- WriteProtoIds(main_stream, /*reserve_only*/ false);
+ WriteProtoIds(main_stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(main_stream, class_defs_offset);
- WriteClassDefs(main_stream, /*reserve_only*/ false);
+ WriteClassDefs(main_stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(main_stream, call_site_ids_offset);
- WriteCallSiteIds(main_stream, /*reserve_only*/ false);
+ WriteCallSiteIds(main_stream, /*reserve_only=*/ false);
}
// Write the map list.
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 178a4d4..598f7df 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -215,7 +215,7 @@
uint32_t GetOffset() const { return offset_; }
void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
- virtual uint32_t Size() const { return 0U; }
+ virtual uint32_t Size() const = 0;
private:
// Start out unassigned.
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index ca6ff9e..947d3d5 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -115,6 +115,8 @@
return it != collection_.end() ? it->second : nullptr;
}
+ uint32_t Size() const override { return size(); }
+
// Lower case for template interop with std::map.
uint32_t size() const { return collection_.size(); }
std::map<uint32_t, T*>& Collection() { return collection_; }
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index a4c5cda..365171b 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -790,16 +790,16 @@
// Based on: https://source.android.com/devices/tech/dalvik/dex-format
// Since the offsets may not be calculated already, the writing must be done in the correct order.
const uint32_t string_ids_offset = stream->Tell();
- WriteStringIds(stream, /*reserve_only*/ true);
+ WriteStringIds(stream, /*reserve_only=*/ true);
WriteTypeIds(stream);
const uint32_t proto_ids_offset = stream->Tell();
- WriteProtoIds(stream, /*reserve_only*/ true);
+ WriteProtoIds(stream, /*reserve_only=*/ true);
WriteFieldIds(stream);
WriteMethodIds(stream);
const uint32_t class_defs_offset = stream->Tell();
- WriteClassDefs(stream, /*reserve_only*/ true);
+ WriteClassDefs(stream, /*reserve_only=*/ true);
const uint32_t call_site_ids_offset = stream->Tell();
- WriteCallSiteIds(stream, /*reserve_only*/ true);
+ WriteCallSiteIds(stream, /*reserve_only=*/ true);
WriteMethodHandles(stream);
uint32_t data_offset_ = 0u;
@@ -812,13 +812,13 @@
// Write code item first to minimize the space required for encoded methods.
// Reserve code item space since we need the debug offsets to actually write them.
const uint32_t code_items_offset = stream->Tell();
- WriteCodeItems(stream, /*reserve_only*/ true);
+ WriteCodeItems(stream, /*reserve_only=*/ true);
// Write debug info section.
WriteDebugInfoItems(stream);
{
// Actually write code items since debug info offsets are calculated now.
Stream::ScopedSeek seek(stream, code_items_offset);
- WriteCodeItems(stream, /*reserve_only*/ false);
+ WriteCodeItems(stream, /*reserve_only=*/ false);
}
WriteEncodedArrays(stream);
@@ -833,19 +833,19 @@
// Write delayed id sections that depend on data sections.
{
Stream::ScopedSeek seek(stream, string_ids_offset);
- WriteStringIds(stream, /*reserve_only*/ false);
+ WriteStringIds(stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(stream, proto_ids_offset);
- WriteProtoIds(stream, /*reserve_only*/ false);
+ WriteProtoIds(stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(stream, class_defs_offset);
- WriteClassDefs(stream, /*reserve_only*/ false);
+ WriteClassDefs(stream, /*reserve_only=*/ false);
}
{
Stream::ScopedSeek seek(stream, call_site_ids_offset);
- WriteCallSiteIds(stream, /*reserve_only*/ false);
+ WriteCallSiteIds(stream, /*reserve_only=*/ false);
}
// Write the map list.
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 493a8a2..7a849f2 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -294,7 +294,7 @@
{
Options options;
std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file,
- /*eagerly_assign_offsets*/ true,
+ /*eagerly_assign_offsets=*/ true,
options));
sections = dex_ir::GetSortedDexFileSections(header.get(),
dex_ir::SortDirection::kSortDescending);
@@ -321,9 +321,9 @@
// Extract all the dex files from the vdex file.
std::string error_msg;
std::unique_ptr<VdexFile> vdex(VdexFile::Open(vdex_name,
- false /*writeable*/,
- false /*low_4gb*/,
- false /*unquicken */,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken= */ false,
&error_msg /*out*/));
if (vdex == nullptr) {
std::cerr << "Could not open vdex file "
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 60dd7e4..47ef0a5 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -34,7 +34,7 @@
class DexDiagTest : public CommonRuntimeTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
}
@@ -68,14 +68,13 @@
EXPECT_TRUE(!oat_location.empty());
std::cout << "==" << oat_location << std::endl;
std::string error_msg;
- std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
EXPECT_TRUE(oat != nullptr) << error_msg;
return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 52d355b..8905aa3 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -247,7 +247,7 @@
* NULL-terminated.
*/
static void Asciify(char* out, const unsigned char* data, size_t len) {
- while (len--) {
+ for (; len != 0u; --len) {
if (*data < 0x20) {
// Could do more here, but we don't need them yet.
switch (*data) {
@@ -1037,26 +1037,6 @@
}
/*
- * Callback for dumping each positions table entry.
- */
-static bool DumpPositionsCb(void* context, const DexFile::PositionInfo& entry) {
- FILE* out_file = reinterpret_cast<FILE*>(context);
- fprintf(out_file, " 0x%04x line=%d\n", entry.address_, entry.line_);
- return false;
-}
-
-/*
- * Callback for dumping locals table entry.
- */
-static void DumpLocalsCb(void* context, const DexFile::LocalInfo& entry) {
- const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
- FILE* out_file = reinterpret_cast<FILE*>(context);
- fprintf(out_file, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
- entry.start_address_, entry.end_address_, entry.reg_,
- entry.name_, entry.descriptor_, signature);
-}
-
-/*
* Lookup functions.
*/
static const char* StringDataByIdx(uint32_t idx, dex_ir::Header* header) {
@@ -1112,8 +1092,13 @@
[this](uint32_t idx) {
return StringDataByIdx(idx, this->header_);
},
- DumpPositionsCb,
- out_file_);
+ [&](const DexFile::PositionInfo& entry) {
+ fprintf(out_file_,
+ " 0x%04x line=%d\n",
+ entry.address_,
+ entry.line_);
+ return false;
+ });
}
fprintf(out_file_, " locals : \n");
if (debug_info != nullptr) {
@@ -1144,8 +1129,18 @@
StringDataByTypeIdx(dchecked_integral_cast<uint16_t>(idx),
this->header_);
},
- DumpLocalsCb,
- out_file_);
+ [&](const DexFile::LocalInfo& entry) {
+ const char* signature =
+ entry.signature_ != nullptr ? entry.signature_ : "";
+ fprintf(out_file_,
+ " 0x%04x - 0x%04x reg=%d %s %s %s\n",
+ entry.start_address_,
+ entry.end_address_,
+ entry.reg_,
+ entry.name_,
+ entry.descriptor_,
+ signature);
+ });
}
}
@@ -1559,7 +1554,7 @@
// Overwrite the existing vector with the new ordering, note that the sets of objects are
// equivalent, but the order changes. This is why this is not a memory leak.
// TODO: Consider cleaning this up with a shared_ptr.
- class_datas[class_data_index].release();
+ class_datas[class_data_index].release(); // NOLINT b/117926937
class_datas[class_data_index].reset(class_data);
++class_data_index;
}
@@ -1575,7 +1570,7 @@
// Overwrite the existing vector with the new ordering, note that the sets of objects are
// equivalent, but the order changes. This is why this is not a memory leak.
// TODO: Consider cleaning this up with a shared_ptr.
- class_defs[i].release();
+ class_defs[i].release(); // NOLINT b/117926937
class_defs[i].reset(new_class_def_order[i]);
}
}
@@ -1676,7 +1671,7 @@
// Now we know what order we want the string data, reorder them.
size_t data_index = 0;
for (dex_ir::StringId* string_id : string_ids) {
- string_datas[data_index].release();
+ string_datas[data_index].release(); // NOLINT b/117926937
string_datas[data_index].reset(string_id->DataItem());
++data_index;
}
@@ -1919,10 +1914,10 @@
data_section->Begin(),
data_section->Size(),
location,
- /* checksum */ 0,
- /*oat_dex_file*/ nullptr,
+ /* location_checksum= */ 0,
+ /*oat_dex_file=*/ nullptr,
verify,
- /*verify_checksum*/ false,
+ /*verify_checksum=*/ false,
error_msg));
CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << *error_msg;
@@ -1933,11 +1928,11 @@
// Regenerate output IR to catch any bugs that might happen during writing.
std::unique_ptr<dex_ir::Header> output_header(
dex_ir::DexIrBuilder(*output_dex_file,
- /*eagerly_assign_offsets*/ true,
+ /*eagerly_assign_offsets=*/ true,
GetOptions()));
std::unique_ptr<dex_ir::Header> orig_header(
dex_ir::DexIrBuilder(*dex_file,
- /*eagerly_assign_offsets*/ true,
+ /*eagerly_assign_offsets=*/ true,
GetOptions()));
CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), error_msg)) << *error_msg;
}
@@ -1960,7 +1955,7 @@
const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!dex_file_loader.Open(
- file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
+ file_name, file_name, /* verify= */ true, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
LOG(ERROR) << error_msg;
@@ -1977,7 +1972,7 @@
if (!ProcessDexFile(file_name,
dex_files[i].get(),
i,
- /*dex_container*/ nullptr,
+ /*dex_container=*/ nullptr,
&error_msg)) {
LOG(WARNING) << "Failed to run dex file " << i << " in " << file_name << " : " << error_msg;
}
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 9f73347..d212e71 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -42,11 +42,11 @@
/*
* Shows usage.
*/
-static void Usage(void) {
+static void Usage() {
LOG(ERROR) << "Copyright (C) 2016 The Android Open Source Project\n";
LOG(ERROR) << kProgramName
<< ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
- " [-s] [-t] [-v] [-w directory] dexfile...\n";
+ " [-s] [-t] [-u] [-v] [-w directory] dexfile...\n";
LOG(ERROR) << " -a : display annotations";
LOG(ERROR) << " -b : build dex_ir";
LOG(ERROR) << " -c : verify checksum and exit";
@@ -85,7 +85,7 @@
bool want_usage = false;
// Parse all arguments.
- while (1) {
+ while (true) {
const int ic = getopt(argc, argv, "abcdefghil:o:p:stuvw:x:");
if (ic < 0) {
break; // done
@@ -203,7 +203,7 @@
}
// Create DexLayout instance.
- DexLayout dex_layout(options, profile_info.get(), out_file, /*header*/ nullptr);
+ DexLayout dex_layout(options, profile_info.get(), out_file, /*header=*/ nullptr);
// Process all files supplied on command line.
int result = 0;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 187c687..54157d9 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -298,7 +298,7 @@
for (const std::string& dex_file : GetLibCoreDexFileNames()) {
std::vector<std::string> dexlayout_args =
{ "-w", tmp_dir, "-o", tmp_name, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
return false;
}
std::string dex_file_name = "classes.dex";
@@ -333,8 +333,8 @@
const ArtDexFileLoader dex_file_loader;
bool result = dex_file_loader.Open(input_dex.c_str(),
input_dex,
- /*verify*/ true,
- /*verify_checksum*/ false,
+ /*verify=*/ true,
+ /*verify_checksum=*/ false,
&error_msg,
&dex_files);
@@ -359,7 +359,7 @@
pfi.AddMethodIndex(static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
dex_location,
dex_file->GetLocationChecksum(),
- /*dex_method_idx*/i,
+ /*method_idx=*/i,
dex_file->NumMethodIds());
}
DexCacheResolvedClasses cur_classes(dex_location,
@@ -447,7 +447,7 @@
// -v makes sure that the layout did not corrupt the dex file.
std::vector<std::string> dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
return false;
}
@@ -459,7 +459,7 @@
// -i since the checksum won't match from the first layout.
std::vector<std::string> second_dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, output_dex };
- if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+ if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
return false;
}
@@ -493,7 +493,7 @@
std::string output_dex = tmp_dir + "classes.dex.new";
std::vector<std::string> dexlayout_args = { "-w", tmp_dir, "-o", "/dev/null", input_dex };
- if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option=*/ false)) {
return false;
}
@@ -615,7 +615,7 @@
{ "-a", "-i", "-o", "/dev/null", temp_dex.GetFilename() };
ASSERT_TRUE(DexLayoutExec(&temp_dex,
kDexFileDuplicateOffset,
- nullptr /* profile_file */,
+ /* profile_file= */ nullptr,
dexlayout_args));
}
@@ -624,7 +624,7 @@
std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
ASSERT_TRUE(DexLayoutExec(&temp_dex,
kNullSetRefListElementInputDex,
- nullptr /* profile_file */,
+ /* profile_file= */ nullptr,
dexlayout_args));
}
@@ -666,7 +666,7 @@
std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
ASSERT_TRUE(DexLayoutExec(&temp_dex,
kUnknownTypeDebugInfoInputDex,
- nullptr /* profile_file */,
+ /* profile_file= */ nullptr,
dexlayout_args));
}
@@ -675,7 +675,7 @@
std::vector<std::string> dexlayout_args = { "-o", "/dev/null", temp_dex.GetFilename() };
ASSERT_TRUE(DexLayoutExec(&temp_dex,
kDuplicateCodeItemInputDex,
- nullptr /* profile_file */,
+ /* profile_file= */ nullptr,
dexlayout_args));
}
@@ -734,7 +734,7 @@
};
// -v makes sure that the layout did not corrupt the dex file.
ASSERT_TRUE(DexLayoutExec(&temp_dex,
- /*dex_filename*/ nullptr,
+ /*dex_filename=*/ nullptr,
&profile_file,
dexlayout_args));
ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
@@ -772,7 +772,7 @@
};
// -v makes sure that the layout did not corrupt the dex file.
ASSERT_TRUE(DexLayoutExec(&temp_dex,
- /*dex_filename*/ nullptr,
+ /*dex_filename=*/ nullptr,
&profile_file,
dexlayout_args));
ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
@@ -785,8 +785,8 @@
const std::string input_jar = GetTestDexFileName("ManyMethods");
CHECK(dex_file_loader.Open(input_jar.c_str(),
input_jar.c_str(),
- /*verify*/ true,
- /*verify_checksum*/ true,
+ /*verify=*/ true,
+ /*verify_checksum=*/ true,
&error_msg,
&dex_files)) << error_msg;
ASSERT_EQ(dex_files.size(), 1u);
@@ -800,14 +800,14 @@
// Filter out all the classes other than the one below based on class descriptor.
options.class_filter_.insert("LManyMethods$Strings;");
DexLayout dexlayout(options,
- /*info*/ nullptr,
- /*out_file*/ nullptr,
- /*header*/ nullptr);
+ /*info=*/ nullptr,
+ /*out_file=*/ nullptr,
+ /*header=*/ nullptr);
std::unique_ptr<DexContainer> out;
bool result = dexlayout.ProcessDexFile(
dex_file->GetLocation().c_str(),
dex_file.get(),
- /*dex_file_index*/ 0,
+ /*dex_file_index=*/ 0,
&out,
&error_msg);
ASSERT_TRUE(result) << "Failed to run dexlayout " << error_msg;
@@ -818,10 +818,10 @@
out->GetDataSection()->Begin(),
out->GetDataSection()->Size(),
dex_file->GetLocation().c_str(),
- /* checksum */ 0,
- /*oat_dex_file*/ nullptr,
- /* verify */ true,
- /*verify_checksum*/ false,
+ /* location_checksum= */ 0,
+ /*oat_dex_file=*/ nullptr,
+ /* verify= */ true,
+ /*verify_checksum=*/ false,
&error_msg));
ASSERT_TRUE(output_dex_file != nullptr);
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index bd521ac..217a024 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -14,18 +14,14 @@
art_cc_binary {
name: "dexlist",
+ defaults: ["art_defaults"],
host_supported: true,
srcs: ["dexlist.cc"],
- cflags: ["-Wall", "-Werror"],
shared_libs: [
"libdexfile",
"libartbase",
"libbase"
],
- // TODO: fix b/72216369 and remove the need for this.
- include_dirs: [
- "art/runtime" // dex utils.
- ],
}
art_cc_test {
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index e7eaf30..adb6a54 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -55,9 +55,9 @@
/*
* Data types that match the definitions in the VM specification.
*/
-typedef uint8_t u1;
-typedef uint32_t u4;
-typedef uint64_t u8;
+using u1 = uint8_t;
+using u4 = uint32_t;
+using u8 = uint64_t;
/*
* Returns a newly-allocated string for the "dot version" of the class
@@ -80,19 +80,6 @@
}
/*
- * Positions table callback; we just want to catch the number of the
- * first line in the method, which *should* correspond to the first
- * entry from the table. (Could also use "min" here.)
- */
-static bool positionsCb(void* context, const DexFile::PositionInfo& entry) {
- int* pFirstLine = reinterpret_cast<int *>(context);
- if (*pFirstLine == -1) {
- *pFirstLine = entry.line_;
- }
- return 0;
-}
-
-/*
* Dumps a method.
*/
static void dumpMethod(const DexFile* pDexFile,
@@ -123,9 +110,13 @@
fileName = "(none)";
}
- // Find the first line.
- int firstLine = -1;
- pDexFile->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), positionsCb, &firstLine);
+ // We just want to catch the number of the first line in the method, which *should* correspond to
+ // the first entry from the table.
+ int first_line = -1;
+ accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ first_line = entry.line_;
+ return true; // Early exit since we only want the first line.
+ });
// Method signature.
const Signature signature = pDexFile->GetMethodSignature(pMethodId);
@@ -134,7 +125,7 @@
// Dump actual method information.
fprintf(gOutFile, "0x%08x %d %s %s %s %s %d\n",
insnsOff, accessor.InsnsSizeInCodeUnits() * 2,
- className.get(), methodName, typeDesc, fileName, firstLine);
+ className.get(), methodName, typeDesc, fileName, first_line);
free(typeDesc);
}
@@ -181,7 +172,7 @@
if (!dex_file_loader.OpenAll(reinterpret_cast<const uint8_t*>(content.data()),
content.size(),
fileName,
- /*verify*/ true,
+ /*verify=*/ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -206,7 +197,7 @@
/*
* Shows usage.
*/
-static void usage(void) {
+static void usage() {
LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
LOG(ERROR) << gProgName << ": [-m p.c.m] [-o outfile] dexfile...";
LOG(ERROR) << "";
@@ -221,7 +212,7 @@
memset(&gOptions, 0, sizeof(gOptions));
// Parse all arguments.
- while (1) {
+ while (true) {
const int ic = getopt(argc, argv, "o:m:");
if (ic < 0) {
break; // done
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 68e6713..39e5f8c 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -33,7 +33,7 @@
class DexListTest : public CommonRuntimeTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// Dogfood our own lib core dex file.
dex_file_ = GetLibCoreDexFileNames()[0];
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index 00b8ef2..21cdcf1 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -39,10 +39,8 @@
kDex2OatFromScratch = 1,
kDex2OatForBootImageOat = 2,
kDex2OatForFilterOat = 3,
- kDex2OatForRelocationOat = 4,
- kDex2OatForBootImageOdex = 5,
- kDex2OatForFilterOdex = 6,
- kDex2OatForRelocationOdex = 7,
+ kDex2OatForBootImageOdex = 4,
+ kDex2OatForFilterOdex = 5,
kErrorInvalidArguments = 101,
kErrorCannotCreateRuntime = 102,
@@ -119,10 +117,8 @@
UsageError(" kDex2OatFromScratch = 1");
UsageError(" kDex2OatForBootImageOat = 2");
UsageError(" kDex2OatForFilterOat = 3");
- UsageError(" kDex2OatForRelocationOat = 4");
- UsageError(" kDex2OatForBootImageOdex = 5");
- UsageError(" kDex2OatForFilterOdex = 6");
- UsageError(" kDex2OatForRelocationOdex = 7");
+ UsageError(" kDex2OatForBootImageOdex = 4");
+ UsageError(" kDex2OatForFilterOdex = 5");
UsageError(" kErrorInvalidArguments = 101");
UsageError(" kErrorCannotCreateRuntime = 102");
@@ -255,8 +251,8 @@
std::unique_ptr<OatFileAssistant> oat_file_assistant;
oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
isa_,
- false /*load_executable*/,
- false /*only_load_system_executable*/,
+ /*load_executable=*/ false,
+ /*only_load_system_executable=*/ false,
vdex_fd_,
oat_fd_,
zip_fd_);
@@ -275,11 +271,9 @@
case OatFileAssistant::kDex2OatFromScratch: return kDex2OatFromScratch;
case OatFileAssistant::kDex2OatForBootImage: return kDex2OatForBootImageOat;
case OatFileAssistant::kDex2OatForFilter: return kDex2OatForFilterOat;
- case OatFileAssistant::kDex2OatForRelocation: return kDex2OatForRelocationOat;
case -OatFileAssistant::kDex2OatForBootImage: return kDex2OatForBootImageOdex;
case -OatFileAssistant::kDex2OatForFilter: return kDex2OatForFilterOdex;
- case -OatFileAssistant::kDex2OatForRelocation: return kDex2OatForRelocationOdex;
default:
LOG(ERROR) << "Unknown dexoptNeeded " << dexoptNeeded;
return kErrorUnknownDexOptNeeded;
diff --git a/dexoptanalyzer/dexoptanalyzer_test.cc b/dexoptanalyzer/dexoptanalyzer_test.cc
index 1cbf546..b9116f0 100644
--- a/dexoptanalyzer/dexoptanalyzer_test.cc
+++ b/dexoptanalyzer/dexoptanalyzer_test.cc
@@ -59,10 +59,8 @@
case 1: return OatFileAssistant::kDex2OatFromScratch;
case 2: return OatFileAssistant::kDex2OatForBootImage;
case 3: return OatFileAssistant::kDex2OatForFilter;
- case 4: return OatFileAssistant::kDex2OatForRelocation;
- case 5: return -OatFileAssistant::kDex2OatForBootImage;
- case 6: return -OatFileAssistant::kDex2OatForFilter;
- case 7: return -OatFileAssistant::kDex2OatForRelocation;
+ case 4: return -OatFileAssistant::kDex2OatForBootImage;
+ case 5: return -OatFileAssistant::kDex2OatForFilter;
default: return dexoptanalyzerResult;
}
}
@@ -75,7 +73,7 @@
bool downgrade = false) {
int dexoptanalyzerResult = Analyze(dex_file, compiler_filter, assume_profile_changed);
dexoptanalyzerResult = DexoptanalyzerToOatFileAssistant(dexoptanalyzerResult);
- OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable*/ false);
+ OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable=*/ false);
int assistantResult = oat_file_assistant.GetDexOptNeeded(
compiler_filter, assume_profile_changed, downgrade);
EXPECT_EQ(assistantResult, dexoptanalyzerResult);
@@ -177,9 +175,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kSpeed,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/true);
+ /*with_alternate_image=*/true);
Verify(dex_location, CompilerFilter::kExtract);
Verify(dex_location, CompilerFilter::kQuicken);
@@ -196,9 +192,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kExtract,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/true);
+ /*with_alternate_image=*/true);
Verify(dex_location, CompilerFilter::kExtract);
Verify(dex_location, CompilerFilter::kQuicken);
@@ -214,6 +208,7 @@
Verify(dex_location, CompilerFilter::kExtract);
Verify(dex_location, CompilerFilter::kSpeed);
+ Verify(dex_location, CompilerFilter::kEverything);
}
// Case: We have a stripped DEX file and a PIC ODEX file, but no OAT file.
@@ -222,7 +217,7 @@
std::string odex_location = GetOdexDir() + "/StrippedDexOdexNoOat.odex";
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
// Strip the dex file
Copy(GetStrippedDexSrc1(), dex_location);
@@ -241,7 +236,7 @@
// Create the odex file
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
// Strip the dex file.
Copy(GetStrippedDexSrc1(), dex_location);
@@ -263,8 +258,7 @@
Verify(dex_location, CompilerFilter::kQuicken);
}
-// Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and
-// OAT files both have patch delta of 0.
+// Case: We have a DEX file, an ODEX file and an OAT file.
TEST_F(DexoptAnalyzerTest, OdexOatOverlap) {
std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex";
@@ -280,18 +274,6 @@
Verify(dex_location, CompilerFilter::kSpeed);
}
-// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
-TEST_F(DexoptAnalyzerTest, DexPicOdexNoOat) {
- std::string dex_location = GetScratchDir() + "/DexPicOdexNoOat.jar";
- std::string odex_location = GetOdexDir() + "/DexPicOdexNoOat.odex";
-
- Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
-
- Verify(dex_location, CompilerFilter::kSpeed);
- Verify(dex_location, CompilerFilter::kEverything);
-}
-
// Case: We have a DEX file and a VerifyAtRuntime ODEX file, but no OAT file..
TEST_F(DexoptAnalyzerTest, DexVerifyAtRuntimeOdexNoOat) {
std::string dex_location = GetScratchDir() + "/DexVerifyAtRuntimeOdexNoOat.jar";
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 2ed41c8..262e815 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -41,9 +41,9 @@
} else if (instruction_set == InstructionSet::kArm64) {
return new arm64::DisassemblerArm64(options);
} else if (instruction_set == InstructionSet::kMips) {
- return new mips::DisassemblerMips(options, /* is_o32_abi */ true);
+ return new mips::DisassemblerMips(options, /* is_o32_abi= */ true);
} else if (instruction_set == InstructionSet::kMips64) {
- return new mips::DisassemblerMips(options, /* is_o32_abi */ false);
+ return new mips::DisassemblerMips(options, /* is_o32_abi= */ false);
} else if (instruction_set == InstructionSet::kX86) {
return new x86::DisassemblerX86(options, false);
} else if (instruction_set == InstructionSet::kX86_64) {
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index c1a6f59..94ea006 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -137,12 +137,12 @@
void DisassemblerArm::CustomDisassembler::CustomDisassemblerStream::PrintLiteral(LocationType type,
int32_t offset) {
// Literal offsets are not required to be aligned, so we may need unaligned access.
- typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1)));
- typedef const uint16_t unaligned_uint16_t __attribute__ ((aligned (1)));
- typedef const int32_t unaligned_int32_t __attribute__ ((aligned (1)));
- typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1)));
- typedef const float unaligned_float __attribute__ ((aligned (1)));
- typedef const double unaligned_double __attribute__ ((aligned (1)));
+ using unaligned_int16_t __attribute__((__aligned__(1))) = const int16_t;
+ using unaligned_uint16_t __attribute__((__aligned__(1))) = const uint16_t;
+ using unaligned_int32_t __attribute__((__aligned__(1))) = const int32_t;
+ using unaligned_int64_t __attribute__((__aligned__(1))) = const int64_t;
+ using unaligned_float __attribute__((__aligned__(1))) = const float;
+ using unaligned_double __attribute__((__aligned__(1))) = const double;
// Zeros are used for the LocationType values this function does not care about.
const size_t literal_size[kVst4Location + 1] = {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index ebc18fc..a1edd00 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -26,6 +26,7 @@
#include <unordered_set>
#include <vector>
+#include <android-base/parseint.h>
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
@@ -35,7 +36,7 @@
#include "class_linker.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
-#include "image.h"
+#include "image-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "oat.h"
@@ -761,7 +762,8 @@
std::unordered_set<size_t> dirty_members;
// Examine the members comprising the ArtMethod, computing which members are dirty.
- for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
+ for (const std::pair<const size_t,
+ MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
const size_t offset = p.first;
if (memcmp(base_ptr + offset, remote_bytes + offset, p.second.size_) != 0) {
dirty_members.insert(p.first);
@@ -787,7 +789,8 @@
void DumpDirtyEntries() REQUIRES_SHARED(Locks::mutator_lock_) {
DumpSamplesAndOffsetCount();
os_ << " offset to field map:\n";
- for (const std::pair<size_t, MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
+ for (const std::pair<const size_t,
+ MemberInfo::NameAndSize>& p : member_info_.offset_to_name_size_) {
const size_t offset = p.first;
const size_t size = p.second.size_;
os_ << StringPrintf(" %zu-%zu: ", offset, offset + size - 1)
@@ -1004,7 +1007,7 @@
begin_image_ptr,
RegionCommon<T>::remote_contents_,
base_ptr,
- /*log_dirty_objects*/true);
+ /*log_dirty_objects=*/true);
// Print shared dirty after since it's less important.
if (RegionCommon<T>::GetZygoteDirtyEntryCount() != 0) {
// We only reach this point if both pids were specified. Furthermore,
@@ -1016,7 +1019,7 @@
begin_image_ptr,
RegionCommon<T>::zygote_contents_,
begin_image_ptr,
- /*log_dirty_objects*/false);
+ /*log_dirty_objects=*/false);
}
RegionSpecializedBase<T>::DumpDirtyObjects();
RegionSpecializedBase<T>::DumpDirtyEntries();
@@ -1682,14 +1685,14 @@
if (option.starts_with("--image-diff-pid=")) {
const char* image_diff_pid = option.substr(strlen("--image-diff-pid=")).data();
- if (!ParseInt(image_diff_pid, &image_diff_pid_)) {
+ if (!android::base::ParseInt(image_diff_pid, &image_diff_pid_)) {
*error_msg = "Image diff pid out of range";
return kParseError;
}
} else if (option.starts_with("--zygote-diff-pid=")) {
const char* zygote_diff_pid = option.substr(strlen("--zygote-diff-pid=")).data();
- if (!ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
+ if (!android::base::ParseInt(zygote_diff_pid, &zygote_diff_pid_)) {
*error_msg = "Zygote diff pid out of range";
return kParseError;
}
@@ -1730,7 +1733,7 @@
return kParseOk;
}
- virtual std::string GetUsage() const {
+ std::string GetUsage() const override {
std::string usage;
usage +=
@@ -1760,7 +1763,7 @@
};
struct ImgDiagMain : public CmdlineMain<ImgDiagArgs> {
- virtual bool ExecuteWithRuntime(Runtime* runtime) {
+ bool ExecuteWithRuntime(Runtime* runtime) override {
CHECK(args_ != nullptr);
return DumpImage(runtime,
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index cb40c7d..73df2a2 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -47,7 +47,7 @@
class ImgDiagTest : public CommonRuntimeTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
// We loaded the runtime with an explicit image. Therefore the image space must exist.
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 4ee48da..19f1532 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -29,6 +29,8 @@
"base/hex_dump.cc",
"base/logging.cc",
"base/malloc_arena_pool.cc",
+ "base/membarrier.cc",
+ "base/memfd.cc",
"base/memory_region.cc",
"base/mem_map.cc",
// "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
@@ -79,6 +81,30 @@
export_shared_lib_headers: ["libbase"],
}
+cc_defaults {
+ name: "libartbase_static_base_defaults",
+ static_libs: [
+ "libbase",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libz",
+ "libziparchive",
+ ],
+}
+
+cc_defaults {
+ name: "libartbase_static_defaults",
+ defaults: ["libartbase_static_base_defaults"],
+ static_libs: ["libartbase"],
+}
+
+cc_defaults {
+ name: "libartbased_static_defaults",
+ defaults: ["libartbase_static_base_defaults"],
+ static_libs: ["libartbased"],
+}
+
gensrcs {
name: "art_libartbase_operator_srcs",
cmd: "$(location generate_operator_out) art/libartbase $(in) > $(out)",
@@ -162,6 +188,8 @@
"base/indenter_test.cc",
"base/leb128_test.cc",
"base/logging_test.cc",
+ "base/memfd_test.cc",
+ "base/membarrier_test.cc",
"base/memory_region_test.cc",
"base/mem_map_test.cc",
"base/safe_copy_test.cc",
diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc
index 1bcfe87..6393672 100644
--- a/libartbase/base/allocator.cc
+++ b/libartbase/base/allocator.cc
@@ -30,11 +30,11 @@
MallocAllocator() {}
~MallocAllocator() {}
- void* Alloc(size_t size) {
+ void* Alloc(size_t size) override {
return calloc(sizeof(uint8_t), size);
}
- void Free(void* p) {
+ void Free(void* p) override {
free(p);
}
@@ -49,12 +49,12 @@
NoopAllocator() {}
~NoopAllocator() {}
- void* Alloc(size_t size ATTRIBUTE_UNUSED) {
+ void* Alloc(size_t size ATTRIBUTE_UNUSED) override {
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
UNREACHABLE();
}
- void Free(void* p ATTRIBUTE_UNUSED) {
+ void Free(void* p ATTRIBUTE_UNUSED) override {
// Noop.
}
diff --git a/libartbase/base/arena_bit_vector.cc b/libartbase/base/arena_bit_vector.cc
index c6d8993..138a5df 100644
--- a/libartbase/base/arena_bit_vector.cc
+++ b/libartbase/base/arena_bit_vector.cc
@@ -62,11 +62,11 @@
UNREACHABLE();
}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) override {
return allocator_->Alloc(size, this->Kind());
}
- virtual void Free(void*) {} // Nop.
+ void Free(void*) override {} // Nop.
private:
ArenaBitVectorAllocator(ArenaAlloc* allocator, ArenaAllocKind kind)
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 76f57da..1f1011e 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -85,15 +85,15 @@
// Load a single bit in the region. The bit at offset 0 is the least
// significant bit in the first byte.
- ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment.
- ALWAYS_INLINE bool LoadBit(uintptr_t bit_offset) const {
+ ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
DCHECK_LT(bit_offset, bit_size_);
- size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
- size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
- return ((data_[index] >> shift) & 1) != 0;
+ uint8_t* data = reinterpret_cast<uint8_t*>(data_);
+ size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
+ size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
+ return ((data[index] >> shift) & 1) != 0;
}
- ALWAYS_INLINE void StoreBit(uintptr_t bit_offset, bool value) {
+ ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
DCHECK_LT(bit_offset, bit_size_);
uint8_t* data = reinterpret_cast<uint8_t*>(data_);
size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
diff --git a/libartbase/base/bit_string_test.cc b/libartbase/base/bit_string_test.cc
index 89a71a1..45f4d4e 100644
--- a/libartbase/base/bit_string_test.cc
+++ b/libartbase/base/bit_string_test.cc
@@ -110,17 +110,17 @@
ASSERT_EQ(BitString::kCapacity, 3u);
EXPECT_BITSTRING_STR("BitString[]", bs);
- bs = SetBitStringCharAt(bs, /*i*/0, /*val*/1u);
+ bs = SetBitStringCharAt(bs, /*i=*/0, /*val=*/1u);
EXPECT_BITSTRING_STR("BitString[1]", bs);
- bs = SetBitStringCharAt(bs, /*i*/1, /*val*/2u);
+ bs = SetBitStringCharAt(bs, /*i=*/1, /*val=*/2u);
EXPECT_BITSTRING_STR("BitString[1,2]", bs);
- bs = SetBitStringCharAt(bs, /*i*/2, /*val*/3u);
+ bs = SetBitStringCharAt(bs, /*i=*/2, /*val=*/3u);
EXPECT_BITSTRING_STR("BitString[1,2,3]", bs);
// There should be at least "kCapacity" # of checks here, 1 for each unique position.
- EXPECT_EQ(MakeBitStringChar(/*idx*/0, /*val*/1u), bs[0]);
- EXPECT_EQ(MakeBitStringChar(/*idx*/1, /*val*/2u), bs[1]);
- EXPECT_EQ(MakeBitStringChar(/*idx*/2, /*val*/3u), bs[2]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/0, /*val=*/1u), bs[0]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/1, /*val=*/2u), bs[1]);
+ EXPECT_EQ(MakeBitStringChar(/*idx=*/2, /*val=*/3u), bs[2]);
// Each maximal value should be tested here for each position.
uint32_t max_bitstring_ints[] = {
diff --git a/libartbase/base/bit_struct_test.cc b/libartbase/base/bit_struct_test.cc
index 577682c..a2389eb 100644
--- a/libartbase/base/bit_struct_test.cc
+++ b/libartbase/base/bit_struct_test.cc
@@ -73,7 +73,7 @@
TEST(BitStructs, Custom) {
CustomBitStruct expected(0b1111);
- BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f{};
+ BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f{};
EXPECT_EQ(1u, sizeof(f));
@@ -85,9 +85,9 @@
EXPECT_EQ(AsUint(f), 0b11110000u);
}
-BITSTRUCT_DEFINE_START(TestTwoCustom, /* size */ 8)
- BitStructField<CustomBitStruct, /*lsb*/0, /*width*/4> f4_a;
- BitStructField<CustomBitStruct, /*lsb*/4, /*width*/4> f4_b;
+BITSTRUCT_DEFINE_START(TestTwoCustom, /* size= */ 8)
+ BitStructField<CustomBitStruct, /*lsb=*/0, /*width=*/4> f4_a;
+ BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f4_b;
BITSTRUCT_DEFINE_END(TestTwoCustom);
TEST(BitStructs, TwoCustom) {
@@ -122,7 +122,7 @@
}
TEST(BitStructs, Number) {
- BitStructNumber<uint16_t, /*lsb*/4, /*width*/4> bsn{};
+ BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4> bsn{};
EXPECT_EQ(2u, sizeof(bsn));
bsn = 0b1111;
@@ -135,20 +135,20 @@
EXPECT_EQ(AsUint(bsn), 0b11110000u);
}
-BITSTRUCT_DEFINE_START(TestBitStruct, /* size */ 8)
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size= */ 8)
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/7> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
BITSTRUCT_DEFINE_END(TestBitStruct);
TEST(BitStructs, Test1) {
{
// Check minimal size selection is correct.
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/7> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
EXPECT_EQ(1u, sizeof(i3));
EXPECT_EQ(1u, sizeof(u4));
@@ -216,12 +216,12 @@
}
}
-BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size */ 32)
- BitStructUint</*lsb*/0, /*width*/3> u3;
- BitStructUint</*lsb*/3, /*width*/10> u10;
- BitStructUint</*lsb*/13, /*width*/19> u19;
+BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size= */ 32)
+ BitStructUint</*lsb=*/0, /*width=*/3> u3;
+ BitStructUint</*lsb=*/3, /*width=*/10> u10;
+ BitStructUint</*lsb=*/13, /*width=*/19> u19;
- BitStructUint</*lsb*/0, /*width*/32> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/32> alias_all;
BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
// static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
@@ -255,11 +255,11 @@
EXPECT_EQ(0b10101010101010101011111010100111u, AsUint(tst));
}
-BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size */ 8)
- BitStructInt</*lsb*/0, /*width*/3> i3;
- BitStructUint</*lsb*/3, /*width*/4> u4;
+BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size= */ 8)
+ BitStructInt</*lsb=*/0, /*width=*/3> i3;
+ BitStructUint</*lsb=*/3, /*width=*/4> u4;
- BitStructUint</*lsb*/0, /*width*/8> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/8> alias_all;
BITSTRUCT_DEFINE_END(TestBitStruct_u8);
TEST(BitStructs, FieldAssignment) {
@@ -283,11 +283,11 @@
}
}
-BITSTRUCT_DEFINE_START(NestedStruct, /* size */ 64)
- BitStructField<MixedSizeBitStruct, /*lsb*/0> mixed_lower;
- BitStructField<MixedSizeBitStruct, /*lsb*/32> mixed_upper;
+BITSTRUCT_DEFINE_START(NestedStruct, /* size= */ 64)
+ BitStructField<MixedSizeBitStruct, /*lsb=*/0> mixed_lower;
+ BitStructField<MixedSizeBitStruct, /*lsb=*/32> mixed_upper;
- BitStructUint</*lsb*/0, /*width*/64> alias_all;
+ BitStructUint</*lsb=*/0, /*width=*/64> alias_all;
BITSTRUCT_DEFINE_END(NestedStruct);
TEST(BitStructs, NestedFieldAssignment) {
diff --git a/libartbase/base/bit_utils_test.cc b/libartbase/base/bit_utils_test.cc
index 3a80600..91fc3b0 100644
--- a/libartbase/base/bit_utils_test.cc
+++ b/libartbase/base/bit_utils_test.cc
@@ -353,89 +353,92 @@
static_assert(MaskLeastSignificant<uint64_t>(63) == (std::numeric_limits<uint64_t>::max() >> 1u),
"TestMaskLeastSignificant#6");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/0) == 0xFF, "TestBitFieldClear#1");
-static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/0) == 0xFF, "TestBitFieldClear#1");
+static_assert(BitFieldClear(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
"TestBitFieldClear#2");
-static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32) == 0x0,
+static_assert(BitFieldClear(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32) == 0x0,
"TestBitFieldClear#3");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/2) == 0b11111100, "TestBitFieldClear#4");
-static_assert(BitFieldClear(0xFF, /*lsb*/0, /*width*/3) == 0b11111000, "TestBitFieldClear#5");
-static_assert(BitFieldClear(0xFF, /*lsb*/1, /*width*/3) == 0b11110001, "TestBitFieldClear#6");
-static_assert(BitFieldClear(0xFF, /*lsb*/2, /*width*/3) == 0b11100011, "TestBitFieldClear#7");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/2) == 0b11111100, "TestBitFieldClear#4");
+static_assert(BitFieldClear(0xFF, /*lsb=*/0, /*width=*/3) == 0b11111000, "TestBitFieldClear#5");
+static_assert(BitFieldClear(0xFF, /*lsb=*/1, /*width=*/3) == 0b11110001, "TestBitFieldClear#6");
+static_assert(BitFieldClear(0xFF, /*lsb=*/2, /*width=*/3) == 0b11100011, "TestBitFieldClear#7");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/0) == 0x0, "TestBitFieldExtract#1");
-static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/0) == 0x0, "TestBitFieldExtract#1");
+static_assert(BitFieldExtract(std::numeric_limits<uint32_t>::max(), /*lsb=*/0, /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldExtract#2");
-static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb*/0, /*width*/32)
+static_assert(BitFieldExtract(std::numeric_limits<int32_t>::max(), /*lsb=*/0, /*width=*/32)
== std::numeric_limits<int32_t>::max(),
"TestBitFieldExtract#3");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/2) == 0b00000011,
"TestBitFieldExtract#4");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/0, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#5");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/1, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/1, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#6");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/2, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/2, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#7");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/3, /*width*/3) == 0b00000111,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/3, /*width=*/3) == 0b00000111,
"TestBitFieldExtract#8");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/8, /*width*/3) == 0b00000000,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/8, /*width=*/3) == 0b00000000,
"TestBitFieldExtract#9");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/7, /*width*/3) == 0b00000001,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/7, /*width=*/3) == 0b00000001,
"TestBitFieldExtract#10");
-static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb*/6, /*width*/3) == 0b00000011,
+static_assert(BitFieldExtract(static_cast<uint32_t>(0xFF), /*lsb=*/6, /*width=*/3) == 0b00000011,
"TestBitFieldExtract#11");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/2) == -1, "TestBitFieldExtract#12");
-static_assert(BitFieldExtract(0xFF, /*lsb*/0, /*width*/3) == -1, "TestBitFieldExtract#13");
-static_assert(BitFieldExtract(0xFF, /*lsb*/1, /*width*/3) == -1, "TestBitFieldExtract#14");
-static_assert(BitFieldExtract(0xFF, /*lsb*/2, /*width*/3) == -1, "TestBitFieldExtract#15");
-static_assert(BitFieldExtract(0xFF, /*lsb*/3, /*width*/3) == -1, "TestBitFieldExtract#16");
-static_assert(BitFieldExtract(0xFF, /*lsb*/8, /*width*/3) == 0b00000000, "TestBitFieldExtract#17");
-static_assert(BitFieldExtract(0xFF, /*lsb*/7, /*width*/3) == 0b00000001, "TestBitFieldExtract#18");
-static_assert(BitFieldExtract(0xFF, /*lsb*/6, /*width*/3) == 0b00000011, "TestBitFieldExtract#19");
-static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/2) == -1, "TestBitFieldExtract#12");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/0, /*width=*/3) == -1, "TestBitFieldExtract#13");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/1, /*width=*/3) == -1, "TestBitFieldExtract#14");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/2, /*width=*/3) == -1, "TestBitFieldExtract#15");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/3, /*width=*/3) == -1, "TestBitFieldExtract#16");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/8, /*width=*/3) == 0b00000000,
+ "TestBitFieldExtract#17");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/7, /*width=*/3) == 0b00000001,
+ "TestBitFieldExtract#18");
+static_assert(BitFieldExtract(0xFF, /*lsb=*/6, /*width=*/3) == 0b00000011,
+ "TestBitFieldExtract#19");
+static_assert(BitFieldExtract(static_cast<uint8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
== 0b00001010,
"TestBitFieldExtract#20");
-static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb*/2, /*width*/4)
+static_assert(BitFieldExtract(static_cast<int8_t>(0b01101010), /*lsb=*/2, /*width=*/4)
== static_cast<int8_t>(0b11111010),
"TestBitFieldExtract#21");
-static_assert(BitFieldInsert(0xFF, /*data*/0x0, /*lsb*/0, /*width*/0) == 0xFF,
+static_assert(BitFieldInsert(0xFF, /*data=*/0x0, /*lsb=*/0, /*width=*/0) == 0xFF,
"TestBitFieldInsert#1");
static_assert(BitFieldInsert(std::numeric_limits<uint32_t>::max(),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#2");
static_assert(BitFieldInsert(std::numeric_limits<int32_t>::max(),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#3");
static_assert(BitFieldInsert(0u,
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#4");
static_assert(BitFieldInsert(-(-0),
- /*data*/std::numeric_limits<uint32_t>::max(),
- /*lsb*/0,
- /*width*/32)
+ /*data=*/std::numeric_limits<uint32_t>::max(),
+ /*lsb=*/0,
+ /*width=*/32)
== std::numeric_limits<uint32_t>::max(),
"TestBitFieldInsert#5");
-static_assert(BitFieldInsert(0x00, /*data*/0b11u, /*lsb*/0, /*width*/2) == 0b00000011,
+static_assert(BitFieldInsert(0x00, /*data=*/0b11u, /*lsb=*/0, /*width=*/2) == 0b00000011,
"TestBitFieldInsert#6");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/0, /*width*/3) == 0b00000111,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/0, /*width=*/3) == 0b00000111,
"TestBitFieldInsert#7");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/1, /*width*/3) == 0b00001110,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/1, /*width=*/3) == 0b00001110,
"TestBitFieldInsert#8");
-static_assert(BitFieldInsert(0x00, /*data*/0b111u, /*lsb*/2, /*width*/3) == 0b00011100,
+static_assert(BitFieldInsert(0x00, /*data=*/0b111u, /*lsb=*/2, /*width=*/3) == 0b00011100,
"TestBitFieldInsert#9");
-static_assert(BitFieldInsert(0b01011100, /*data*/0b1101u, /*lsb*/4, /*width*/4) == 0b11011100,
+static_assert(BitFieldInsert(0b01011100, /*data=*/0b1101u, /*lsb=*/4, /*width=*/4) == 0b11011100,
"TestBitFieldInsert#10");
template <typename Container>
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 6dd2381..9485fca 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -73,11 +73,11 @@
file_.reset(file);
}
-ScratchFile::ScratchFile(ScratchFile&& other) {
+ScratchFile::ScratchFile(ScratchFile&& other) noexcept {
*this = std::move(other);
}
-ScratchFile& ScratchFile::operator=(ScratchFile&& other) {
+ScratchFile& ScratchFile::operator=(ScratchFile&& other) noexcept {
if (GetFile() != other.GetFile()) {
std::swap(filename_, other.filename_);
std::swap(file_, other.file_);
@@ -251,7 +251,7 @@
static constexpr bool kVerifyChecksum = true;
const ArtDexFileLoader dex_file_loader;
if (!dex_file_loader.Open(
- location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
+ location, location, /* verify= */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index d645fa1..32a2628 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -54,9 +54,9 @@
ScratchFile(const ScratchFile& other, const char* suffix);
- ScratchFile(ScratchFile&& other);
+ ScratchFile(ScratchFile&& other) noexcept;
- ScratchFile& operator=(ScratchFile&& other);
+ ScratchFile& operator=(ScratchFile&& other) noexcept;
explicit ScratchFile(File* file);
diff --git a/libartbase/base/file_magic.cc b/libartbase/base/file_magic.cc
index d8d843b..1471c59 100644
--- a/libartbase/base/file_magic.cc
+++ b/libartbase/base/file_magic.cc
@@ -31,7 +31,7 @@
File OpenAndReadMagic(const char* filename, uint32_t* magic, std::string* error_msg) {
CHECK(magic != nullptr);
- File fd(filename, O_RDONLY, /* check_usage */ false);
+ File fd(filename, O_RDONLY, /* check_usage= */ false);
if (fd.Fd() == -1) {
*error_msg = StringPrintf("Unable to open '%s' : %s", filename, strerror(errno));
return File();
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index a63f326..1d106b2 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -279,4 +279,12 @@
return android::base::StartsWith(full_path, framework_path);
}
+int DupCloexec(int fd) {
+#if defined(__linux__)
+ return fcntl(fd, F_DUPFD_CLOEXEC, 0);
+#else
+ return dup(fd);
+#endif
+}
+
} // namespace art
diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h
index 063393b..c249bcc 100644
--- a/libartbase/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -78,6 +78,9 @@
// Return whether the location is on system/framework (i.e. android_root/framework).
bool LocationIsOnSystemFramework(const char* location);
+// dup(2), except setting the O_CLOEXEC flag atomically, when possible.
+int DupCloexec(int fd);
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_FILE_UTILS_H_
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 2a7273b..f7c9c5e 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -71,12 +71,12 @@
// Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
UniqueCPtr<char> root_dup(strdup(android_root_env.c_str()));
char* dir = dirname(root_dup.get());
- ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, /* overwrite */ 1));
std::string android_root2 = GetAndroidRootSafe(&error_msg);
EXPECT_STREQ(dir, android_root2.c_str());
// Set a bogus value for ANDROID_ROOT. This should be an error.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
EXPECT_EQ(GetAndroidRootSafe(&error_msg), "");
// Unset ANDROID_ROOT and see that it still returns something (as libart code is running).
@@ -90,7 +90,7 @@
// Reset ANDROID_ROOT, as other things may depend on it.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), 1 /* overwrite */));
+ ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), /* overwrite */ 1));
}
TEST_F(FileUtilsTest, ReplaceFileExtension) {
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index bc79ff2..2a2a737 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -123,9 +123,12 @@
// True if we allow moving classes.
static constexpr bool kMovingClasses = !kMarkCompactSupport;
// If true, enable generational collection when using the Concurrent Copying
-// collector, i.e. use sticky-bit CC for minor collections and (full) CC for
-// major collections.
-#ifdef ART_USE_GENERATIONAL_CC
+// (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
+// for major collections.
+//
+// Generational CC collection is currently only compatible with Baker read
+// barriers.
+#if defined(ART_USE_GENERATIONAL_CC) && defined(ART_READ_BARRIER_TYPE_IS_BAKER)
static constexpr bool kEnableGenerationalConcurrentCopyingCollection = true;
#else
static constexpr bool kEnableGenerationalConcurrentCopyingCollection = false;
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 33866bb..323fa4e 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -42,12 +42,21 @@
private: \
void* operator new(size_t) = delete // NOLINT
-#define OFFSETOF_MEMBER(t, f) \
- (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u)) // NOLINT
+// offsetof is not defined by the spec on types with non-standard layout,
+// however it is implemented by compilers in practice.
+// (note that reinterpret_cast is not valid constexpr)
+//
+// Alternative approach would be something like:
+// #define OFFSETOF_HELPER(t, f) \
+// (reinterpret_cast<uintptr_t>(&reinterpret_cast<t*>(16)->f) - static_cast<uintptr_t>(16u))
+// #define OFFSETOF_MEMBER(t, f) \
+// (__builtin_constant_p(OFFSETOF_HELPER(t,f)) ? OFFSETOF_HELPER(t,f) : OFFSETOF_HELPER(t,f))
+#define OFFSETOF_MEMBER(t, f) offsetof(t, f)
#define OFFSETOF_MEMBERPTR(t, f) \
(reinterpret_cast<uintptr_t>(&(reinterpret_cast<t*>(16)->*f)) - static_cast<uintptr_t>(16)) // NOLINT
+#define ALIGNED(x) __attribute__ ((__aligned__(x)))
#define PACKED(x) __attribute__ ((__aligned__(x), __packed__))
// Stringify the argument.
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 1bf553d..532ca28 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -394,7 +394,7 @@
return Invalid();
}
const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
- return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+ return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, /* reuse= */ true);
}
template<typename A, typename B>
@@ -585,7 +585,7 @@
redzone_size);
}
-MemMap::MemMap(MemMap&& other)
+MemMap::MemMap(MemMap&& other) noexcept
: MemMap() {
swap(other);
}
@@ -692,6 +692,24 @@
int tail_prot,
std::string* error_msg,
bool use_debug_name) {
+ return RemapAtEnd(new_end,
+ tail_name,
+ tail_prot,
+ MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
+ /* fd= */ -1,
+ /* offset= */ 0,
+ error_msg,
+ use_debug_name);
+}
+
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ int flags,
+ int fd,
+ off_t offset,
+ std::string* error_msg,
+ bool use_debug_name) {
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -715,9 +733,6 @@
DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
DCHECK_ALIGNED(tail_base_size, kPageSize);
- unique_fd fd;
- int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
-
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
// removes old mappings for the overlapping region. This makes the operation atomic
@@ -726,13 +741,13 @@
tail_base_size,
tail_prot,
flags,
- fd.get(),
- 0));
+ fd,
+ offset));
if (actual == MAP_FAILED) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
- *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+ *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
"maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
- fd.get());
+ fd);
return Invalid();
}
// Update *this.
@@ -756,7 +771,7 @@
uint8_t* begin = Begin();
ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
size_t base_size = RoundUp(byte_count, kPageSize);
- return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+ return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse= */ false);
}
void MemMap::ReleaseReservedMemory(size_t byte_count) {
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 20eda32..4f92492 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -68,8 +68,8 @@
return MemMap();
}
- MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
- MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+ MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
+ MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
Reset();
swap(other);
return *this;
@@ -261,6 +261,16 @@
std::string* error_msg,
bool use_debug_name = true);
+ // Unmap the pages of a file at end and remap them to create another memory map.
+ MemMap RemapAtEnd(uint8_t* new_end,
+ const char* tail_name,
+ int tail_prot,
+ int tail_flags,
+ int fd,
+ off_t offset,
+ std::string* error_msg,
+ bool use_debug_name = true);
+
// Take ownership of pages at the beginning of the mapping. The mapping must be an
// anonymous reservation mapping, owning entire pages. The `byte_count` must not
// exceed the size of this reservation.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index ab3d18f..5815cf9 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -53,7 +53,7 @@
// Find a valid map address and unmap it before returning.
std::string error_msg;
MemMap map = MemMap::MapAnonymous("temp",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
size,
PROT_READ,
low_4gb,
@@ -68,7 +68,7 @@
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a two-page memory region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
2 * page_size,
PROT_READ | PROT_WRITE,
low_4gb,
@@ -165,17 +165,17 @@
TEST_F(MemMapTest, ReplaceMapping_SameSize) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
void* source_addr = source.Begin();
@@ -200,21 +200,21 @@
TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
5 * kPageSize, // Need to make it larger
// initially so we know
// there won't be mappings
// in the way we we move
// source.
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -246,17 +246,17 @@
TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
std::string error_msg;
MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
uint8_t* source_addr = source.Begin();
@@ -285,11 +285,11 @@
MemMap dest =
MemMap::MapAnonymous(
"MapAnonymousEmpty-atomic-replace-dest",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
// the way we we move source.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(dest.IsValid());
// Resize down to 1 page so we can remap the rest.
@@ -299,7 +299,7 @@
dest.Begin() + kPageSize,
2 * kPageSize,
PROT_WRITE | PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(source.IsValid());
ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -332,20 +332,20 @@
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
0,
PROT_READ,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -358,7 +358,7 @@
reinterpret_cast<uint8_t*>(kPageSize),
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
nullptr);
ASSERT_FALSE(map.IsValid());
}
@@ -368,20 +368,20 @@
CommonInit();
std::string error_msg;
MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
0,
PROT_READ,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid()) << error_msg;
ASSERT_FALSE(error_msg.empty());
error_msg.clear();
map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -394,12 +394,12 @@
constexpr size_t kMapSize = kPageSize;
std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
- MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+ MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /*start*/0,
- /*low_4gb*/true,
+ /*start=*/0,
+ /*low_4gb=*/true,
scratch_file.GetFilename().c_str(),
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
@@ -413,23 +413,23 @@
CommonInit();
std::string error_msg;
// Find a valid address.
- uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
+ uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
// Map at an address that should work, which should succeed.
MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
valid_address,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
ASSERT_TRUE(map0.BaseBegin() == valid_address);
// Map at an unspecified address, which should succeed.
MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -439,7 +439,7 @@
reinterpret_cast<uint8_t*>(map1.BaseBegin()),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map2.IsValid()) << error_msg;
ASSERT_TRUE(!error_msg.empty());
@@ -455,6 +455,53 @@
}
#endif
+TEST_F(MemMapTest, RemapFileViewAtEnd) {
+ CommonInit();
+ std::string error_msg;
+ ScratchFile scratch_file;
+
+ // Create a scratch file 3 pages large.
+ constexpr size_t kMapSize = 3 * kPageSize;
+ std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+ memset(data.get(), 1, kPageSize);
+ memset(&data[0], 0x55, kPageSize);
+ memset(&data[kPageSize], 0x5a, kPageSize);
+ memset(&data[2 * kPageSize], 0xaa, kPageSize);
+ ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+ MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /*start=*/0,
+ /*low_4gb=*/true,
+ scratch_file.GetFilename().c_str(),
+ &error_msg);
+ ASSERT_TRUE(map.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map.Size(), kMapSize);
+ ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
+ ASSERT_EQ(data[0], *map.Begin());
+ ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
+ ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
+
+ for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
+ MemMap tail = map.RemapAtEnd(map.Begin() + offset,
+ "bad_offset_map",
+ PROT_READ,
+ MAP_PRIVATE | MAP_FIXED,
+ scratch_file.GetFd(),
+ offset,
+ &error_msg);
+ ASSERT_TRUE(tail.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(offset, map.Size());
+ ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
+ ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
+ ASSERT_EQ(data[offset], *tail.Begin());
+ }
+}
+
TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// Some MIPS32 hardware (namely the Creator Ci20 development board)
// cannot allocate in the 2GB-4GB region.
@@ -475,7 +522,7 @@
reinterpret_cast<uint8_t*>(start_addr),
size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ true,
+ /*low_4gb=*/ true,
&error_msg);
if (map.IsValid()) {
break;
@@ -496,7 +543,7 @@
reinterpret_cast<uint8_t*>(ptr),
2 * kPageSize, // brings it over the top.
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -511,7 +558,7 @@
reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -524,7 +571,7 @@
reinterpret_cast<uint8_t*>(0xF0000000),
0x20000000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
+ /* low_4gb= */ true,
&error_msg);
ASSERT_FALSE(map.IsValid());
ASSERT_FALSE(error_msg.empty());
@@ -538,9 +585,9 @@
nullptr,
0x20000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ false,
+ /* reservation= */ nullptr,
&error_msg);
ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -548,9 +595,9 @@
reinterpret_cast<uint8_t*>(map.BaseBegin()),
0x10000,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
- /* reuse */ true,
- /* reservation */ nullptr,
+ /* low_4gb= */ false,
+ /* reuse= */ true,
+ /* reservation= */ nullptr,
&error_msg);
ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -562,10 +609,10 @@
constexpr size_t kNumPages = 3;
// Map a 3-page mem map.
MemMap map = MemMap::MapAnonymous("MapAnonymous0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kPageSize * kNumPages,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -580,7 +627,7 @@
map_base,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map0.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -588,7 +635,7 @@
map_base + kPageSize,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -596,7 +643,7 @@
map_base + kPageSize * 2,
kPageSize,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
ASSERT_TRUE(error_msg.empty());
@@ -625,10 +672,10 @@
const size_t page_size = static_cast<size_t>(kPageSize);
// Map a region.
MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
14 * page_size,
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(m0.IsValid());
uint8_t* base0 = m0.Begin();
@@ -731,10 +778,10 @@
ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
MemMap reservation = MemMap::MapAnonymous("Test reservation",
- /* addr */ nullptr,
+ /* addr= */ nullptr,
kMapSize,
PROT_NONE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
&error_msg);
ASSERT_TRUE(reservation.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -744,14 +791,14 @@
static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
uint8_t* addr1 = reservation.Begin();
MemMap map1 = MemMap::MapFileAtAddress(addr1,
- /* byte_count */ kChunk1Size,
+ /* byte_count= */ kChunk1Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start */ 0,
- /* low_4gb */ false,
+ /* start= */ 0,
+ /* low_4gb= */ false,
scratch_file.GetFilename().c_str(),
- /* reuse */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map1.IsValid()) << error_msg;
@@ -769,10 +816,10 @@
uint8_t* addr2 = reservation.Begin();
MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
addr2,
- /* byte_count */ kChunk2Size,
+ /* byte_count= */ kChunk2Size,
PROT_READ,
- /* low_4gb */ false,
- /* reuse */ false,
+ /* low_4gb= */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map2.IsValid()) << error_msg;
@@ -786,14 +833,14 @@
const size_t kChunk3Size = reservation.Size() - 1u;
uint8_t* addr3 = reservation.Begin();
MemMap map3 = MemMap::MapFileAtAddress(addr3,
- /* byte_count */ kChunk3Size,
+ /* byte_count= */ kChunk3Size,
PROT_READ,
MAP_PRIVATE,
scratch_file.GetFd(),
- /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
- /* low_4gb */ false,
+ /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /* low_4gb= */ false,
scratch_file.GetFilename().c_str(),
- /* reuse */ false,
+ /* reuse= */ false,
&reservation,
&error_msg);
ASSERT_TRUE(map3.IsValid()) << error_msg;
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
new file mode 100644
index 0000000..490dbf3
--- /dev/null
+++ b/libartbase/base/membarrier.cc
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "membarrier.h"
+
+#include <errno.h>
+
+#include <sys/syscall.h>
+#include <unistd.h>
+#include "macros.h"
+
+#if defined(__BIONIC__)
+
+#include <atomic>
+#include <android/get_device_api_level.h>
+#include <linux/membarrier.h>
+
+#define CHECK_MEMBARRIER_CMD(art_value, membarrier_value) \
+ static_assert(static_cast<int>(art_value) == membarrier_value, "Bad value for " # art_value)
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kQuery, MEMBARRIER_CMD_QUERY);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kGlobal, MEMBARRIER_CMD_SHARED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kRegisterPrivateExpedited,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED);
+CHECK_MEMBARRIER_CMD(art::MembarrierCommand::kPrivateExpedited, MEMBARRIER_CMD_PRIVATE_EXPEDITED);
+#undef CHECK_MEMBARRIER_CMD
+
+#endif // __BIONIC
+
+namespace art {
+
+#if defined(__NR_membarrier)
+
+int membarrier(MembarrierCommand command) {
+#if defined(__BIONIC__)
+ // Avoid calling membarrier on older Android versions where membarrier may be barred by secomp
+ // causing the current process to be killed. The probing here could be considered expensive so
+ // endeavour not to repeat too often.
+ static int api_level = android_get_device_api_level();
+ if (api_level < __ANDROID_API_Q__) {
+ errno = ENOSYS;
+ return -1;
+ }
+#endif // __BIONIC__
+ return syscall(__NR_membarrier, static_cast<int>(command), 0);
+}
+
+#else // __NR_membarrier
+
+int membarrier(MembarrierCommand command ATTRIBUTE_UNUSED) {
+ // In principle this could be supported on linux, but Android's prebuilt glibc does not include
+ // the system call number defintions (b/111199492).
+ errno = ENOSYS;
+ return -1;
+}
+
+#endif // __NR_membarrier
+
+} // namespace art
diff --git a/libartbase/base/membarrier.h b/libartbase/base/membarrier.h
new file mode 100644
index 0000000..f829fc1
--- /dev/null
+++ b/libartbase/base/membarrier.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MEMBARRIER_H_
+#define ART_LIBARTBASE_BASE_MEMBARRIER_H_
+
+namespace art {
+ // Command types for the linux membarrier system call. Different Linux installation may include
+ // different subsets of these commands (at the same codepoints).
+ //
+ // Hardcoding these values is temporary until bionic and prebuilts glibc have an up to date
+ // linux/membarrier.h. The order and values follow the current linux definitions.
+ enum class MembarrierCommand : int {
+ // MEMBARRIER_CMD_QUERY
+ kQuery = 0,
+ // MEMBARRIER_CMD_GLOBAL
+ kGlobal = (1 << 0),
+ // MEMBARRIER_CMD_GLOBAL_EXPEDITED
+ kGlobalExpedited = (1 << 1),
+ // MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
+ kRegisterGlobalExpedited = (1 << 2),
+ // MEMBARRIER_CMD_PRIVATE_EXPEDITED
+ kPrivateExpedited = (1 << 3),
+ // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
+ kRegisterPrivateExpedited = (1 << 4),
+ // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
+ kPrivateExpeditedSyncCore = (1 << 5),
+ // MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
+ kRegisterPrivateExpeditedSyncCore = (1 << 6)
+ };
+
+ // Call membarrier(2) if available on platform and return result. This method can fail if the
+ // command is not supported by the kernel. The underlying system call is linux specific.
+ int membarrier(MembarrierCommand command);
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_MEMBARRIER_H_
diff --git a/libartbase/base/membarrier_test.cc b/libartbase/base/membarrier_test.cc
new file mode 100644
index 0000000..3eedf14
--- /dev/null
+++ b/libartbase/base/membarrier_test.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "membarrier.h"
+
+class ScopedErrnoCleaner {
+ public:
+ ScopedErrnoCleaner() { errno = 0; }
+ ~ScopedErrnoCleaner() { errno = 0; }
+};
+
+bool HasMembarrier(art::MembarrierCommand cmd) {
+ ScopedErrnoCleaner errno_cleaner;
+ int supported_cmds = art::membarrier(art::MembarrierCommand::kQuery);
+ return (supported_cmds > 0) && ((supported_cmds & static_cast<int>(cmd)) != 0);
+}
+
+TEST(membarrier, query) {
+ ScopedErrnoCleaner errno_cleaner;
+ int supported = art::membarrier(art::MembarrierCommand::kQuery);
+ if (errno == 0) {
+ ASSERT_LE(0, supported);
+ } else {
+ ASSERT_TRUE(errno == ENOSYS && supported == -1);
+ }
+}
+
+TEST(membarrier, global_barrier) {
+ if (!HasMembarrier(art::MembarrierCommand::kGlobal)) {
+ GTEST_LOG_(INFO) << "MembarrierCommand::kGlobal not supported, skipping test.";
+ return;
+ }
+ ASSERT_EQ(0, art::membarrier(art::MembarrierCommand::kGlobal));
+}
+
+static const char* MembarrierCommandToName(art::MembarrierCommand cmd) {
+#define CASE_VALUE(x) case (x): return #x;
+ switch (cmd) {
+ CASE_VALUE(art::MembarrierCommand::kQuery);
+ CASE_VALUE(art::MembarrierCommand::kGlobal);
+ CASE_VALUE(art::MembarrierCommand::kGlobalExpedited);
+ CASE_VALUE(art::MembarrierCommand::kRegisterGlobalExpedited);
+ CASE_VALUE(art::MembarrierCommand::kPrivateExpedited);
+ CASE_VALUE(art::MembarrierCommand::kRegisterPrivateExpedited);
+ CASE_VALUE(art::MembarrierCommand::kPrivateExpeditedSyncCore);
+ CASE_VALUE(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore);
+ }
+}
+
+static void TestRegisterAndBarrierCommands(art::MembarrierCommand membarrier_cmd_register,
+ art::MembarrierCommand membarrier_cmd_barrier) {
+ if (!HasMembarrier(membarrier_cmd_register)) {
+ GTEST_LOG_(INFO) << MembarrierCommandToName(membarrier_cmd_register)
+ << " not supported, skipping test.";
+ return;
+ }
+ if (!HasMembarrier(membarrier_cmd_barrier)) {
+ GTEST_LOG_(INFO) << MembarrierCommandToName(membarrier_cmd_barrier)
+ << " not supported, skipping test.";
+ return;
+ }
+
+ ScopedErrnoCleaner errno_cleaner;
+
+ // Check barrier use without prior registration.
+ if (membarrier_cmd_register == art::MembarrierCommand::kRegisterGlobalExpedited) {
+ // Global barrier use is always okay.
+ ASSERT_EQ(0, art::membarrier(membarrier_cmd_barrier));
+ } else {
+ // Private barrier should fail.
+ ASSERT_EQ(-1, art::membarrier(membarrier_cmd_barrier));
+ ASSERT_EQ(EPERM, errno);
+ errno = 0;
+ }
+
+ // Check registration for barrier succeeds.
+ ASSERT_EQ(0, art::membarrier(membarrier_cmd_register));
+
+ // Check barrier use after registration succeeds.
+ ASSERT_EQ(0, art::membarrier(membarrier_cmd_barrier));
+}
+
+TEST(membarrier, global_expedited) {
+ TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterGlobalExpedited,
+ art::MembarrierCommand::kGlobalExpedited);
+}
+
+TEST(membarrier, private_expedited) {
+ TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterPrivateExpedited,
+ art::MembarrierCommand::kPrivateExpedited);
+}
+
+TEST(membarrier, private_expedited_sync_core) {
+ TestRegisterAndBarrierCommands(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore,
+ art::MembarrierCommand::kPrivateExpeditedSyncCore);
+}
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
new file mode 100644
index 0000000..7c20401
--- /dev/null
+++ b/libartbase/base/memfd.cc
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "memfd.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#include "macros.h"
+
+// When building for linux host, glibc in prebuilts does not include memfd_create system call
+// number. As a temporary testing measure, we add the definition here.
+#if defined(__linux__) && !defined(__NR_memfd_create)
+#if defined(__x86_64__)
+#define __NR_memfd_create 319
+#elif defined(__i386__)
+#define __NR_memfd_create 356
+#endif // defined(__i386__)
+#endif // defined(__linux__) && !defined(__NR_memfd_create)
+
+namespace art {
+
+#if defined(__NR_memfd_create)
+
+int memfd_create(const char* name, unsigned int flags) {
+ // Check kernel version supports memfd_create(). Some older kernels segfault executing
+ // memfd_create() rather than returning ENOSYS (b/116769556).
+ static constexpr int kRequiredMajor = 3;
+ static constexpr int kRequiredMinor = 17;
+ struct utsname uts;
+ int major, minor;
+ if (uname(&uts) != 0 ||
+ strcmp(uts.sysname, "Linux") != 0 ||
+ sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
+ (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
+ errno = ENOSYS;
+ return -1;
+ }
+
+ return syscall(__NR_memfd_create, name, flags);
+}
+
+#else // __NR_memfd_create
+
+int memfd_create(const char* name ATTRIBUTE_UNUSED, unsigned int flags ATTRIBUTE_UNUSED) {
+ errno = ENOSYS;
+ return -1;
+}
+
+#endif // __NR_memfd_create
+
+} // namespace art
diff --git a/libartbase/base/memfd.h b/libartbase/base/memfd.h
new file mode 100644
index 0000000..91db0b2
--- /dev/null
+++ b/libartbase/base/memfd.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MEMFD_H_
+#define ART_LIBARTBASE_BASE_MEMFD_H_
+
+namespace art {
+
+// Call memfd(2) if available on platform and return result. This call also makes a kernel version
+// check for safety on older kernels (b/116769556)..
+int memfd_create(const char* name, unsigned int flags);
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_MEMFD_H_
diff --git a/libartbase/base/memfd_test.cc b/libartbase/base/memfd_test.cc
new file mode 100644
index 0000000..1edf3a1
--- /dev/null
+++ b/libartbase/base/memfd_test.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "memfd.h"
+
+TEST(memfd, basic) {
+ errno = 0;
+ int fd = art::memfd_create("memfd_create_test", 0);
+ if (fd < 0) {
+ ASSERT_EQ(ENOSYS, errno);
+ GTEST_LOG_(INFO) << "memfd_create not supported, skipping test.";
+ return;
+ }
+ ASSERT_TRUE(close(fd) == 0 || errno != EBADF);
+}
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index d381f01..1a6a9bb 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -44,7 +44,7 @@
extern "C" void __asan_handle_no_return();
-# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address, noinline))
# define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
constexpr bool kRunningOnMemoryTool = true;
constexpr bool kMemoryToolDetectsLeaks = true;
diff --git a/libartbase/base/scoped_arena_allocator.cc b/libartbase/base/scoped_arena_allocator.cc
index ab05c60..a54f350 100644
--- a/libartbase/base/scoped_arena_allocator.cc
+++ b/libartbase/base/scoped_arena_allocator.cc
@@ -106,7 +106,7 @@
return ptr;
}
-ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other)
+ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept
: DebugStackReference(std::move(other)),
DebugStackRefCounter(),
ArenaAllocatorStats(other),
diff --git a/libartbase/base/scoped_arena_allocator.h b/libartbase/base/scoped_arena_allocator.h
index 7eaec5e..52d0361 100644
--- a/libartbase/base/scoped_arena_allocator.h
+++ b/libartbase/base/scoped_arena_allocator.h
@@ -138,7 +138,7 @@
class ScopedArenaAllocator
: private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
public:
- ScopedArenaAllocator(ScopedArenaAllocator&& other);
+ ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept;
explicit ScopedArenaAllocator(ArenaStack* arena_stack);
~ScopedArenaAllocator();
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index d679328..beee501 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -40,7 +40,7 @@
// to acquire a lock, and the unlock / close in the corresponding
// destructor. Callers should explicitly flush files they're writing to if
// that is the desired behaviour.
- std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, false /* check_usage */));
+ std::unique_ptr<File> file(OS::OpenFileWithFlags(filename, flags, /* auto_flush= */ false));
if (file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open file '%s': %s", filename, strerror(errno));
return nullptr;
@@ -98,7 +98,7 @@
// destructor. Callers should explicitly flush files they're writing to if
// that is the desired behaviour.
ScopedFlock locked_file(
- new LockedFile(dup(fd), path, false /* check_usage */, read_only_mode));
+ new LockedFile(dup(fd), path, /* check_usage= */ false, read_only_mode));
if (locked_file->Fd() == -1) {
*error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
locked_file->GetPath().c_str(), strerror(errno));
diff --git a/libartbase/base/scoped_flock_test.cc b/libartbase/base/scoped_flock_test.cc
index f9ac1e0..22356cd 100644
--- a/libartbase/base/scoped_flock_test.cc
+++ b/libartbase/base/scoped_flock_test.cc
@@ -38,7 +38,7 @@
// Attempt to acquire a second lock on the same file. This must fail.
ScopedFlock second_lock = LockedFile::Open(scratch_file.GetFilename().c_str(),
O_RDONLY,
- /* block */ false,
+ /* block= */ false,
&error_msg);
ASSERT_TRUE(second_lock.get() == nullptr);
ASSERT_TRUE(!error_msg.empty());
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index c5313e9..de60277 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -21,6 +21,10 @@
#include <sys/types.h>
#include <unistd.h>
+#if defined(__BIONIC__)
+#include <android/fdsan.h>
+#endif
+
#include <limits>
#include <android-base/logging.h>
@@ -36,26 +40,34 @@
namespace unix_file {
-FdFile::FdFile()
- : guard_state_(GuardState::kClosed), fd_(-1), auto_close_(true), read_only_mode_(false) {
+#if defined(__BIONIC__)
+static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
+ return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
+ reinterpret_cast<uint64_t>(fd_file));
}
+#endif
FdFile::FdFile(int fd, bool check_usage)
- : guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
- fd_(fd), auto_close_(true), read_only_mode_(false) {
-}
+ : FdFile(fd, std::string(), check_usage) {}
FdFile::FdFile(int fd, const std::string& path, bool check_usage)
- : FdFile(fd, path, check_usage, false) {
-}
+ : FdFile(fd, path, check_usage, false) {}
-FdFile::FdFile(int fd, const std::string& path, bool check_usage, bool read_only_mode)
+FdFile::FdFile(int fd, const std::string& path, bool check_usage,
+ bool read_only_mode)
: guard_state_(check_usage ? GuardState::kBase : GuardState::kNoCheck),
- fd_(fd), file_path_(path), auto_close_(true), read_only_mode_(read_only_mode) {
+ fd_(fd),
+ file_path_(path),
+ read_only_mode_(read_only_mode) {
+#if defined(__BIONIC__)
+ if (fd >= 0) {
+ android_fdsan_exchange_owner_tag(fd, 0, GetFdFileOwnerTag(this));
+ }
+#endif
}
-FdFile::FdFile(const std::string& path, int flags, mode_t mode, bool check_usage)
- : fd_(-1), auto_close_(true) {
+FdFile::FdFile(const std::string& path, int flags, mode_t mode,
+ bool check_usage) {
Open(path, flags, mode);
if (!check_usage || !IsOpened()) {
guard_state_ = GuardState::kNoCheck;
@@ -72,14 +84,28 @@
}
DCHECK_GE(guard_state_, GuardState::kClosed);
}
- if (auto_close_ && fd_ != -1) {
+ if (fd_ != -1) {
if (Close() != 0) {
PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
}
}
}
-FdFile& FdFile::operator=(FdFile&& other) {
+FdFile::FdFile(FdFile&& other) noexcept
+ : guard_state_(other.guard_state_),
+ fd_(other.fd_),
+ file_path_(std::move(other.file_path_)),
+ read_only_mode_(other.read_only_mode_) {
+#if defined(__BIONIC__)
+ if (fd_ >= 0) {
+ android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
+ }
+#endif
+ other.guard_state_ = GuardState::kClosed;
+ other.fd_ = -1;
+}
+
+FdFile& FdFile::operator=(FdFile&& other) noexcept {
if (this == &other) {
return *this;
}
@@ -91,10 +117,15 @@
guard_state_ = other.guard_state_;
fd_ = other.fd_;
file_path_ = std::move(other.file_path_);
- auto_close_ = other.auto_close_;
read_only_mode_ = other.read_only_mode_;
- other.Release(); // Release other.
+#if defined(__BIONIC__)
+ if (fd_ >= 0) {
+ android_fdsan_exchange_owner_tag(fd_, GetFdFileOwnerTag(&other), GetFdFileOwnerTag(this));
+ }
+#endif
+ other.guard_state_ = GuardState::kClosed;
+ other.fd_ = -1;
return *this;
}
@@ -102,6 +133,39 @@
Destroy();
}
+int FdFile::Release() {
+ int tmp_fd = fd_;
+ fd_ = -1;
+ guard_state_ = GuardState::kNoCheck;
+#if defined(__BIONIC__)
+ if (tmp_fd >= 0) {
+ android_fdsan_exchange_owner_tag(tmp_fd, GetFdFileOwnerTag(this), 0);
+ }
+#endif
+ return tmp_fd;
+}
+
+void FdFile::Reset(int fd, bool check_usage) {
+ CHECK_NE(fd, fd_);
+
+ if (fd_ != -1) {
+ Destroy();
+ }
+ fd_ = fd;
+
+#if defined(__BIONIC__)
+ if (fd_ >= 0) {
+ android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
+ }
+#endif
+
+ if (check_usage) {
+ guard_state_ = fd == -1 ? GuardState::kNoCheck : GuardState::kBase;
+ } else {
+ guard_state_ = GuardState::kNoCheck;
+ }
+}
+
void FdFile::moveTo(GuardState target, GuardState warn_threshold, const char* warning) {
if (kCheckSafeUsage) {
if (guard_state_ < GuardState::kNoCheck) {
@@ -125,10 +189,6 @@
}
}
-void FdFile::DisableAutoClose() {
- auto_close_ = false;
-}
-
bool FdFile::Open(const std::string& path, int flags) {
return Open(path, flags, 0640);
}
@@ -141,6 +201,11 @@
if (fd_ == -1) {
return false;
}
+
+#if defined(__BIONIC__)
+ android_fdsan_exchange_owner_tag(fd_, 0, GetFdFileOwnerTag(this));
+#endif
+
file_path_ = path;
if (kCheckSafeUsage && (flags & (O_RDWR | O_CREAT | O_WRONLY)) != 0) {
// Start in the base state (not flushed, not closed).
@@ -154,7 +219,11 @@
}
int FdFile::Close() {
+#if defined(__BIONIC__)
+ int result = android_fdsan_close_with_tag(fd_, GetFdFileOwnerTag(this));
+#else
int result = close(fd_);
+#endif
// Test here, so the file is closed and not leaked.
if (kCheckSafeUsage) {
diff --git a/libartbase/base/unix_file/fd_file.h b/libartbase/base/unix_file/fd_file.h
index 19be3ef..f5aa2a5 100644
--- a/libartbase/base/unix_file/fd_file.h
+++ b/libartbase/base/unix_file/fd_file.h
@@ -34,52 +34,28 @@
// Not thread safe.
class FdFile : public RandomAccessFile {
public:
- FdFile();
- // Creates an FdFile using the given file descriptor. Takes ownership of the
- // file descriptor. (Use DisableAutoClose to retain ownership.)
- FdFile(int fd, bool checkUsage);
- FdFile(int fd, const std::string& path, bool checkUsage);
- FdFile(int fd, const std::string& path, bool checkUsage, bool read_only_mode);
+ FdFile() = default;
+ // Creates an FdFile using the given file descriptor.
+ // Takes ownership of the file descriptor.
+ FdFile(int fd, bool check_usage);
+ FdFile(int fd, const std::string& path, bool check_usage);
+ FdFile(int fd, const std::string& path, bool check_usage, bool read_only_mode);
- FdFile(const std::string& path, int flags, bool checkUsage)
- : FdFile(path, flags, 0640, checkUsage) {}
- FdFile(const std::string& path, int flags, mode_t mode, bool checkUsage);
+ FdFile(const std::string& path, int flags, bool check_usage)
+ : FdFile(path, flags, 0640, check_usage) {}
+ FdFile(const std::string& path, int flags, mode_t mode, bool check_usage);
// Move constructor.
- FdFile(FdFile&& other)
- : guard_state_(other.guard_state_),
- fd_(other.fd_),
- file_path_(std::move(other.file_path_)),
- auto_close_(other.auto_close_),
- read_only_mode_(other.read_only_mode_) {
- other.Release(); // Release the src.
- }
+ FdFile(FdFile&& other) noexcept;
// Move assignment operator.
- FdFile& operator=(FdFile&& other);
+ FdFile& operator=(FdFile&& other) noexcept;
// Release the file descriptor. This will make further accesses to this FdFile invalid. Disables
// all further state checking.
- int Release() {
- int tmp_fd = fd_;
- fd_ = -1;
- guard_state_ = GuardState::kNoCheck;
- auto_close_ = false;
- return tmp_fd;
- }
+ int Release();
- void Reset(int fd, bool check_usage) {
- if (fd_ != -1 && fd_ != fd) {
- Destroy();
- }
- fd_ = fd;
- if (check_usage) {
- guard_state_ = fd == -1 ? GuardState::kNoCheck : GuardState::kBase;
- } else {
- guard_state_ = GuardState::kNoCheck;
- }
- // Keep the auto_close_ state.
- }
+ void Reset(int fd, bool check_usage);
// Destroys an FdFile, closing the file descriptor if Close hasn't already
// been called. (If you care about the return value of Close, call it
@@ -121,7 +97,6 @@
const std::string& GetPath() const {
return file_path_;
}
- void DisableAutoClose();
bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED;
bool PreadFully(void* buffer, size_t byte_count, size_t offset) WARN_UNUSED;
bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED;
@@ -168,7 +143,7 @@
}
}
- GuardState guard_state_;
+ GuardState guard_state_ = GuardState::kClosed;
// Opens file 'file_path' using 'flags' and 'mode'.
bool Open(const std::string& file_path, int flags);
@@ -180,10 +155,9 @@
void Destroy(); // For ~FdFile and operator=(&&).
- int fd_;
+ int fd_ = -1;
std::string file_path_;
- bool auto_close_;
- bool read_only_mode_;
+ bool read_only_mode_ = false;
DISALLOW_COPY_AND_ASSIGN(FdFile);
};
diff --git a/libartbase/base/unix_file/fd_file_test.cc b/libartbase/base/unix_file/fd_file_test.cc
index 1f731a7..9c39bb5 100644
--- a/libartbase/base/unix_file/fd_file_test.cc
+++ b/libartbase/base/unix_file/fd_file_test.cc
@@ -23,8 +23,11 @@
class FdFileTest : public RandomAccessFileTest {
protected:
- virtual RandomAccessFile* MakeTestFile() {
- return new FdFile(fileno(tmpfile()), false);
+ RandomAccessFile* MakeTestFile() override {
+ FILE* tmp = tmpfile();
+ int fd = dup(fileno(tmp));
+ fclose(tmp);
+ return new FdFile(fd, false);
}
};
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 761c611..0f172fd 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -24,6 +24,7 @@
#include <sys/wait.h>
#include <unistd.h>
+#include <fstream>
#include <memory>
#include "android-base/file.h"
@@ -38,6 +39,12 @@
#include "AvailabilityMacros.h" // For MAC_OS_X_VERSION_MAX_ALLOWED
#endif
+#if defined(__BIONIC__)
+// membarrier(2) is only supported for target builds (b/111199492).
+#include <linux/membarrier.h>
+#include <sys/syscall.h>
+#endif
+
#if defined(__linux__)
#include <linux/unistd.h>
#endif
@@ -207,4 +214,25 @@
}
}
+std::string GetProcessStatus(const char* key) {
+ // Build search pattern of key and separator.
+ std::string pattern(key);
+ pattern.push_back(':');
+
+ // Search for status lines starting with pattern.
+ std::ifstream fs("/proc/self/status");
+ std::string line;
+ while (std::getline(fs, line)) {
+ if (strncmp(pattern.c_str(), line.c_str(), pattern.size()) == 0) {
+ // Skip whitespace in matching line (if any).
+ size_t pos = line.find_first_not_of(" \t", pattern.size());
+ if (UNLIKELY(pos == std::string::npos)) {
+ break;
+ }
+ return std::string(line, pos);
+ }
+ }
+ return "<unknown>";
+}
+
} // namespace art
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index ba61e1b..9c71055 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -24,6 +24,7 @@
#include <string>
#include <android-base/logging.h>
+#include <android-base/parseint.h>
#include "casts.h"
#include "enums.h"
@@ -33,34 +34,6 @@
namespace art {
-template <typename T>
-bool ParseUint(const char *in, T* out) {
- char* end;
- unsigned long long int result = strtoull(in, &end, 0); // NOLINT(runtime/int)
- if (in == end || *end != '\0') {
- return false;
- }
- if (std::numeric_limits<T>::max() < result) {
- return false;
- }
- *out = static_cast<T>(result);
- return true;
-}
-
-template <typename T>
-bool ParseInt(const char* in, T* out) {
- char* end;
- long long int result = strtoll(in, &end, 0); // NOLINT(runtime/int)
- if (in == end || *end != '\0') {
- return false;
- }
- if (result < std::numeric_limits<T>::min() || std::numeric_limits<T>::max() < result) {
- return false;
- }
- *out = static_cast<T>(result);
- return true;
-}
-
static inline uint32_t PointerToLowMemUInt32(const void* p) {
uintptr_t intp = reinterpret_cast<uintptr_t>(p);
DCHECK_LE(intp, 0xFFFFFFFFU);
@@ -130,7 +103,7 @@
DCHECK(option.starts_with(option_prefix)) << option << " " << option_prefix;
const char* value_string = option.substr(option_prefix.size()).data();
int64_t parsed_integer_value = 0;
- if (!ParseInt(value_string, &parsed_integer_value)) {
+ if (!android::base::ParseInt(value_string, &parsed_integer_value)) {
usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
}
*out = dchecked_integral_cast<T>(parsed_integer_value);
@@ -179,14 +152,14 @@
// Sleep forever and never come back.
NO_RETURN void SleepForever();
-inline void FlushInstructionCache(char* begin, char* end) {
- __builtin___clear_cache(begin, end);
+inline void FlushDataCache(void* begin, void* end) {
+ __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end));
}
-inline void FlushDataCache(char* begin, char* end) {
+inline void FlushInstructionCache(void* begin, void* end) {
// Same as FlushInstructionCache for lack of other builtin. __builtin___clear_cache
// flushes both caches.
- __builtin___clear_cache(begin, end);
+ __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end));
}
template <typename T>
@@ -243,6 +216,11 @@
}
}
+// Lookup value for a given key in /proc/self/status. Keys and values are separated by a ':' in
+// the status file. Returns value found on success and "<unknown>" if the key is not found or
+// there is an I/O error.
+std::string GetProcessStatus(const char* key);
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/utils_test.cc b/libartbase/base/utils_test.cc
index 892d1fd..9bd50c3 100644
--- a/libartbase/base/utils_test.cc
+++ b/libartbase/base/utils_test.cc
@@ -126,4 +126,12 @@
EXPECT_EQ(BoundsCheckedCast<const uint64_t*>(buffer + 57, buffer, buffer_end), nullptr);
}
+TEST_F(UtilsTest, GetProcessStatus) {
+ EXPECT_EQ("utils_test", GetProcessStatus("Name"));
+ EXPECT_EQ("R (running)", GetProcessStatus("State"));
+ EXPECT_EQ("<unknown>", GetProcessStatus("tate"));
+ EXPECT_EQ("<unknown>", GetProcessStatus("e"));
+ EXPECT_EQ("<unknown>", GetProcessStatus("Dummy"));
+}
+
} // namespace art
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 174d227..f5761cf 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -75,10 +75,10 @@
name += " extracted in memory from ";
name += zip_filename;
MemMap map = MemMap::MapAnonymous(name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
GetUncompressedLength(),
PROT_READ | PROT_WRITE,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
@@ -138,7 +138,7 @@
MAP_PRIVATE,
zip_fd,
offset,
- /* low_4gb */ false,
+ /* low_4gb= */ false,
name.c_str(),
error_msg);
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 8fc8b54..d326a9e 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -30,8 +30,9 @@
#include "unix_file/random_access_file.h"
// system/core/zip_archive definitions.
+struct ZipArchive;
struct ZipEntry;
-typedef void* ZipArchiveHandle;
+typedef ZipArchive* ZipArchiveHandle;
namespace art {
diff --git a/libartbase/base/zip_archive_test.cc b/libartbase/base/zip_archive_test.cc
index b99a471..b923881 100644
--- a/libartbase/base/zip_archive_test.cc
+++ b/libartbase/base/zip_archive_test.cc
@@ -41,7 +41,7 @@
ScratchFile tmp;
ASSERT_NE(-1, tmp.GetFd());
- std::unique_ptr<File> file(new File(tmp.GetFd(), tmp.GetFilename(), false));
+ std::unique_ptr<File> file(new File(dup(tmp.GetFd()), tmp.GetFilename(), false));
ASSERT_TRUE(file.get() != nullptr);
bool success = zip_entry->ExtractToFile(*file, &error_msg);
ASSERT_TRUE(success) << error_msg;
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 06fd19e..49b1278 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -72,6 +72,36 @@
],
}
+cc_defaults {
+ name: "libdexfile_static_base_defaults",
+ static_libs: [
+ "libbase",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libz",
+ "libziparchive",
+ ],
+}
+
+cc_defaults {
+ name: "libdexfile_static_defaults",
+ defaults: [
+ "libartbase_static_defaults",
+ "libdexfile_static_base_defaults",
+ ],
+ static_libs: ["libdexfile"],
+}
+
+cc_defaults {
+ name: "libdexfiled_static_defaults",
+ defaults: [
+ "libartbased_static_defaults",
+ "libdexfile_static_base_defaults",
+ ],
+ static_libs: ["libdexfiled"],
+}
+
gensrcs {
name: "dexfile_operator_srcs",
cmd: "$(location generate_operator_out) art/libdexfile $(in) > $(out)",
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index eb7d3d3..20a519b 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -95,7 +95,7 @@
File fd;
if (zip_fd != -1) {
if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
- fd = File(zip_fd, false /* check_usage */);
+ fd = File(DupCloexec(zip_fd), /* check_usage= */ false);
}
} else {
fd = OpenAndReadMagic(filename, &magic, error_msg);
@@ -142,9 +142,9 @@
if (IsMagicValid(magic)) {
std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
filename,
- /* verify */ false,
- /* verify_checksum */ false,
- /* mmap_shared */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
+ /* mmap_shared= */ false,
error_msg));
if (dex_file == nullptr) {
return false;
@@ -167,16 +167,16 @@
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
return OpenCommon(base,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
location_checksum,
oat_dex_file,
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
@@ -199,8 +199,8 @@
uint8_t* begin = map.Begin();
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
location_checksum,
kNoOatDexFile,
@@ -208,7 +208,7 @@
verify_checksum,
error_msg,
std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
+ /*verify_result=*/ nullptr);
// Opening CompactDex is only supported from vdex files.
if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
*error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
@@ -240,7 +240,7 @@
location,
verify,
verify_checksum,
- /* mmap_shared */ false,
+ /* mmap_shared= */ false,
error_msg));
if (dex_file.get() != nullptr) {
dex_files->push_back(std::move(dex_file));
@@ -290,7 +290,7 @@
CHECK(!location.empty());
MemMap map;
{
- File delayed_close(fd, /* check_usage */ false);
+ File delayed_close(fd, /* check_usage= */ false);
struct stat sbuf;
memset(&sbuf, 0, sizeof(sbuf));
if (fstat(fd, &sbuf) == -1) {
@@ -308,7 +308,7 @@
mmap_shared ? MAP_SHARED : MAP_PRIVATE,
fd,
0,
- /*low_4gb*/false,
+ /*low_4gb=*/false,
location.c_str(),
error_msg);
if (!map.IsValid()) {
@@ -330,8 +330,8 @@
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
dex_header->checksum_,
kNoOatDexFile,
@@ -339,7 +339,7 @@
verify_checksum,
error_msg,
std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
+ /*verify_result=*/ nullptr);
// Opening CompactDex is only supported from vdex files.
if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
@@ -407,8 +407,8 @@
size_t size = map.Size();
std::unique_ptr<DexFile> dex_file = OpenCommon(begin,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
zip_entry->GetCrc32(),
kNoOatDexFile,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index a7d0363..f7a2062 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -217,9 +217,9 @@
std::string plain_method = std::string("GetMethodSignature.") + r.name;
ASSERT_EQ(plain_method,
- raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ false));
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ false));
ASSERT_EQ(r.pretty_method,
- raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ true));
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature= */ true));
}
}
@@ -332,8 +332,8 @@
std::string error_msg;
bool success = loader.Open(data_location_path.c_str(),
data_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -360,8 +360,8 @@
std::string error_msg;
bool success = loader.Open(system_location_path.c_str(),
system_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -388,8 +388,8 @@
std::string error_msg;
bool success = loader.Open(system_framework_location_path.c_str(),
system_framework_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -416,8 +416,8 @@
std::string error_msg;
bool success = loader.Open(data_multi_location_path.c_str(),
data_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -445,8 +445,8 @@
std::string error_msg;
bool success = loader.Open(system_multi_location_path.c_str(),
system_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
@@ -474,8 +474,8 @@
std::string error_msg;
bool success = loader.Open(system_framework_multi_location_path.c_str(),
system_framework_multi_location_path,
- /* verify */ false,
- /* verify_checksum */ false,
+ /* verify= */ false,
+ /* verify_checksum= */ false,
&error_msg,
&dex_files);
ASSERT_TRUE(success) << error_msg;
diff --git a/libdexfile/dex/code_item_accessors-inl.h b/libdexfile/dex/code_item_accessors-inl.h
index c166f5f..bbf2224 100644
--- a/libdexfile/dex/code_item_accessors-inl.h
+++ b/libdexfile/dex/code_item_accessors-inl.h
@@ -184,19 +184,48 @@
CodeItemDataAccessor::Init(code_item);
}
-template<typename NewLocalCallback>
-inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(bool is_static,
- uint32_t method_idx,
- NewLocalCallback new_local,
- void* context) const {
+template<typename NewLocalVisitor>
+inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(
+ bool is_static,
+ uint32_t method_idx,
+ const NewLocalVisitor& new_local) const {
return dex_file_->DecodeDebugLocalInfo(RegistersSize(),
InsSize(),
InsnsSizeInCodeUnits(),
DebugInfoOffset(),
is_static,
method_idx,
- new_local,
- context);
+ new_local);
+}
+
+template <typename Visitor>
+inline uint32_t CodeItemDebugInfoAccessor::VisitParameterNames(const Visitor& visitor) const {
+ const uint8_t* stream = dex_file_->GetDebugInfoStream(DebugInfoOffset());
+ return (stream != nullptr) ? DexFile::DecodeDebugInfoParameterNames(&stream, visitor) : 0u;
+}
+
+inline bool CodeItemDebugInfoAccessor::GetLineNumForPc(const uint32_t address,
+ uint32_t* line_num) const {
+ return DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ // We know that this callback will be called in ascending address order, so keep going until we
+ // find a match or we've just gone past it.
+ if (entry.address_ > address) {
+ // The line number from the previous positions callback will be the final result.
+ return true;
+ }
+ *line_num = entry.line_;
+ return entry.address_ == address;
+ });
+}
+
+template <typename Visitor>
+inline bool CodeItemDebugInfoAccessor::DecodeDebugPositionInfo(const Visitor& visitor) const {
+ return dex_file_->DecodeDebugPositionInfo(
+ dex_file_->GetDebugInfoStream(DebugInfoOffset()),
+ [this](uint32_t idx) {
+ return dex_file_->StringDataByIdx(dex::StringIndex(idx));
+ },
+ visitor);
}
} // namespace art
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 695cc7b..c307c9f 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -151,11 +151,20 @@
return debug_info_offset_;
}
- template<typename NewLocalCallback>
+ template<typename NewLocalVisitor>
bool DecodeDebugLocalInfo(bool is_static,
uint32_t method_idx,
- NewLocalCallback new_local,
- void* context) const;
+ const NewLocalVisitor& new_local) const;
+
+ // Visit each parameter in the debug information. Returns the line number.
+ // The argument of the Visitor is dex::StringIndex.
+ template <typename Visitor>
+ uint32_t VisitParameterNames(const Visitor& visitor) const;
+
+ template <typename Visitor>
+ bool DecodeDebugPositionInfo(const Visitor& visitor) const;
+
+ bool GetLineNumForPc(const uint32_t pc, uint32_t* line_num) const;
protected:
ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item, uint32_t dex_method_index);
diff --git a/libdexfile/dex/code_item_accessors_test.cc b/libdexfile/dex/code_item_accessors_test.cc
index 2bb4dde..87f4bab 100644
--- a/libdexfile/dex/code_item_accessors_test.cc
+++ b/libdexfile/dex/code_item_accessors_test.cc
@@ -45,10 +45,10 @@
std::unique_ptr<const DexFile> dex(dex_file_loader.Open(data->data(),
data->size(),
"location",
- /*location_checksum*/ 123,
- /*oat_dex_file*/nullptr,
- /*verify*/false,
- /*verify_checksum*/false,
+ /*location_checksum=*/ 123,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
&error_msg));
CHECK(dex != nullptr) << error_msg;
return dex;
@@ -56,11 +56,11 @@
TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
std::vector<uint8_t> standard_dex_data;
- std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex*/false,
+ std::unique_ptr<const DexFile> standard_dex(CreateFakeDex(/*compact_dex=*/false,
&standard_dex_data));
ASSERT_TRUE(standard_dex != nullptr);
std::vector<uint8_t> compact_dex_data;
- std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true,
+ std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex=*/true,
&compact_dex_data));
ASSERT_TRUE(compact_dex != nullptr);
static constexpr uint16_t kRegisterSize = 2;
diff --git a/libdexfile/dex/compact_dex_file.cc b/libdexfile/dex/compact_dex_file.cc
index 302b59e..641c523 100644
--- a/libdexfile/dex/compact_dex_file.cc
+++ b/libdexfile/dex/compact_dex_file.cc
@@ -100,7 +100,7 @@
location_checksum,
oat_dex_file,
std::move(container),
- /*is_compact_dex*/ true),
+ /*is_compact_dex=*/ true),
debug_info_offsets_(DataBegin() + GetHeader().debug_info_offsets_pos_,
GetHeader().debug_info_base_,
GetHeader().debug_info_offsets_table_offset_) {}
diff --git a/libdexfile/dex/compact_dex_file_test.cc b/libdexfile/dex/compact_dex_file_test.cc
index 517c587..799967e 100644
--- a/libdexfile/dex/compact_dex_file_test.cc
+++ b/libdexfile/dex/compact_dex_file_test.cc
@@ -68,11 +68,11 @@
uint16_t out_outs_size;
uint16_t out_tries_size;
uint32_t out_insns_size_in_code_units;
- code_item->DecodeFields</*kDecodeOnlyInstructionCount*/false>(&out_insns_size_in_code_units,
- &out_registers_size,
- &out_ins_size,
- &out_outs_size,
- &out_tries_size);
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/false>(&out_insns_size_in_code_units,
+ &out_registers_size,
+ &out_ins_size,
+ &out_outs_size,
+ &out_tries_size);
ASSERT_EQ(registers_size, out_registers_size);
ASSERT_EQ(ins_size, out_ins_size);
ASSERT_EQ(outs_size, out_outs_size);
@@ -80,11 +80,11 @@
ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
++out_insns_size_in_code_units; // Force value to change.
- code_item->DecodeFields</*kDecodeOnlyInstructionCount*/true>(&out_insns_size_in_code_units,
- /*registers_size*/ nullptr,
- /*ins_size*/ nullptr,
- /*outs_size*/ nullptr,
- /*tries_size*/ nullptr);
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount=*/true>(&out_insns_size_in_code_units,
+ /*registers_size=*/ nullptr,
+ /*ins_size=*/ nullptr,
+ /*outs_size=*/ nullptr,
+ /*tries_size=*/ nullptr);
ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
};
static constexpr uint32_t kMax32 = std::numeric_limits<uint32_t>::max();
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index c512361..eae7efc 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -22,6 +22,7 @@
#include "base/casts.h"
#include "base/leb128.h"
#include "base/stringpiece.h"
+#include "base/utils.h"
#include "class_iterator.h"
#include "compact_dex_file.h"
#include "dex_instruction_iterator.h"
@@ -214,10 +215,9 @@
uint16_t registers_size,
uint16_t ins_size,
uint16_t insns_size_in_code_units,
- IndexToStringData index_to_string_data,
- TypeIndexToStringData type_index_to_string_data,
- NewLocalCallback new_local_callback,
- void* context) {
+ const IndexToStringData& index_to_string_data,
+ const TypeIndexToStringData& type_index_to_string_data,
+ const NewLocalCallback& new_local_callback) {
if (stream == nullptr) {
return false;
}
@@ -277,7 +277,7 @@
for (uint16_t reg = 0; reg < registers_size; reg++) {
if (local_in_reg[reg].is_live_) {
local_in_reg[reg].end_address_ = insns_size_in_code_units;
- new_local_callback(context, local_in_reg[reg]);
+ new_local_callback(local_in_reg[reg]);
}
}
return true;
@@ -306,7 +306,7 @@
// Emit what was previously there, if anything
if (local_in_reg[reg].is_live_) {
local_in_reg[reg].end_address_ = address;
- new_local_callback(context, local_in_reg[reg]);
+ new_local_callback(local_in_reg[reg]);
}
local_in_reg[reg].name_ = index_to_string_data(name_idx);
@@ -328,7 +328,7 @@
// closed register is sloppy, but harmless if no further action is taken.
if (local_in_reg[reg].is_live_) {
local_in_reg[reg].end_address_ = address;
- new_local_callback(context, local_in_reg[reg]);
+ new_local_callback(local_in_reg[reg]);
local_in_reg[reg].is_live_ = false;
}
break;
@@ -368,8 +368,7 @@
uint32_t debug_info_offset,
bool is_static,
uint32_t method_idx,
- NewLocalCallback new_local_callback,
- void* context) const {
+ const NewLocalCallback& new_local_callback) const {
const uint8_t* const stream = GetDebugInfoStream(debug_info_offset);
if (stream == nullptr) {
return false;
@@ -395,25 +394,19 @@
return StringByTypeIdx(dex::TypeIndex(
dchecked_integral_cast<uint16_t>(idx)));
},
- new_local_callback,
- context);
+ new_local_callback);
}
template<typename DexDebugNewPosition, typename IndexToStringData>
bool DexFile::DecodeDebugPositionInfo(const uint8_t* stream,
- IndexToStringData index_to_string_data,
- DexDebugNewPosition position_functor,
- void* context) {
+ const IndexToStringData& index_to_string_data,
+ const DexDebugNewPosition& position_functor) {
if (stream == nullptr) {
return false;
}
- PositionInfo entry = PositionInfo();
- entry.line_ = DecodeUnsignedLeb128(&stream);
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- DecodeUnsignedLeb128P1(&stream); // Parameter name.
- }
+ PositionInfo entry;
+ entry.line_ = DecodeDebugInfoParameterNames(&stream, VoidFunctor());
for (;;) {
uint8_t opcode = *stream++;
@@ -456,7 +449,7 @@
int adjopcode = opcode - DBG_FIRST_SPECIAL;
entry.address_ += adjopcode / DBG_LINE_RANGE;
entry.line_ += DBG_LINE_BASE + (adjopcode % DBG_LINE_RANGE);
- if (position_functor(context, entry)) {
+ if (position_functor(entry)) {
return true; // early exit.
}
entry.prologue_end_ = false;
@@ -467,18 +460,6 @@
}
}
-template<typename DexDebugNewPosition>
-bool DexFile::DecodeDebugPositionInfo(uint32_t debug_info_offset,
- DexDebugNewPosition position_functor,
- void* context) const {
- return DecodeDebugPositionInfo(GetDebugInfoStream(debug_info_offset),
- [this](uint32_t idx) {
- return StringDataByIdx(dex::StringIndex(idx));
- },
- position_functor,
- context);
-}
-
inline const CompactDexFile* DexFile::AsCompactDexFile() const {
DCHECK(IsCompactDexFile());
return down_cast<const CompactDexFile*>(this);
@@ -502,6 +483,18 @@
return { ClassIterator(*this, 0u), ClassIterator(*this, NumClassDefs()) };
}
+// Returns the line number
+template <typename Visitor>
+inline uint32_t DexFile::DecodeDebugInfoParameterNames(const uint8_t** debug_info,
+ const Visitor& visitor) {
+ uint32_t line = DecodeUnsignedLeb128(debug_info);
+ const uint32_t parameters_size = DecodeUnsignedLeb128(debug_info);
+ for (uint32_t i = 0; i < parameters_size; ++i) {
+ visitor(dex::StringIndex(DecodeUnsignedLeb128P1(debug_info)));
+ }
+ return line;
+}
+
} // namespace art
#endif // ART_LIBDEXFILE_DEX_DEX_FILE_INL_H_
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index a2198b7..48f38ca 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -60,6 +60,17 @@
UpdateUnsignedLeb128(data_ptr, new_access_flags);
}
+void DexFile::UnhideApis() const {
+ for (ClassAccessor accessor : GetClasses()) {
+ for (const ClassAccessor::Field& field : accessor.GetFields()) {
+ field.UnHideAccessFlags();
+ }
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ method.UnHideAccessFlags();
+ }
+ }
+}
+
uint32_t DexFile::CalculateChecksum() const {
return CalculateChecksum(Begin(), Size());
}
@@ -481,22 +492,6 @@
return -1;
}
-bool DexFile::LineNumForPcCb(void* raw_context, const PositionInfo& entry) {
- LineNumFromPcContext* context = reinterpret_cast<LineNumFromPcContext*>(raw_context);
-
- // We know that this callback will be called in
- // ascending address order, so keep going until we find
- // a match or we've just gone past it.
- if (entry.address_ > context->address_) {
- // The line number from the previous positions callback
- // wil be the final result.
- return true;
- } else {
- context->line_num_ = entry.line_;
- return entry.address_ == context->address_;
- }
-}
-
// Read a signed integer. "zwidth" is the zero-based byte count.
int32_t DexFile::ReadSignedInt(const uint8_t* ptr, int zwidth) {
int32_t val = 0;
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index 98787d1..30d8b6d 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -782,8 +782,6 @@
// Callback for "new locals table entry".
typedef void (*DexDebugNewLocalCb)(void* context, const LocalInfo& entry);
- static bool LineNumForPcCb(void* context, const PositionInfo& entry);
-
const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
return DataPointer<AnnotationsDirectoryItem>(class_def.annotations_off_);
}
@@ -865,15 +863,6 @@
DBG_LINE_RANGE = 15,
};
- struct LineNumFromPcContext {
- LineNumFromPcContext(uint32_t address, uint32_t line_num)
- : address_(address), line_num_(line_num) {}
- uint32_t address_;
- uint32_t line_num_;
- private:
- DISALLOW_COPY_AND_ASSIGN(LineNumFromPcContext);
- };
-
// Returns false if there is no debugging information or if it cannot be decoded.
template<typename NewLocalCallback, typename IndexToStringData, typename TypeIndexToStringData>
static bool DecodeDebugLocalInfo(const uint8_t* stream,
@@ -885,10 +874,9 @@
uint16_t registers_size,
uint16_t ins_size,
uint16_t insns_size_in_code_units,
- IndexToStringData index_to_string_data,
- TypeIndexToStringData type_index_to_string_data,
- NewLocalCallback new_local,
- void* context);
+ const IndexToStringData& index_to_string_data,
+ const TypeIndexToStringData& type_index_to_string_data,
+ const NewLocalCallback& new_local) NO_THREAD_SAFETY_ANALYSIS;
template<typename NewLocalCallback>
bool DecodeDebugLocalInfo(uint32_t registers_size,
uint32_t ins_size,
@@ -896,19 +884,13 @@
uint32_t debug_info_offset,
bool is_static,
uint32_t method_idx,
- NewLocalCallback new_local,
- void* context) const;
+ const NewLocalCallback& new_local) const;
// Returns false if there is no debugging information or if it cannot be decoded.
template<typename DexDebugNewPosition, typename IndexToStringData>
static bool DecodeDebugPositionInfo(const uint8_t* stream,
- IndexToStringData index_to_string_data,
- DexDebugNewPosition position_functor,
- void* context);
- template<typename DexDebugNewPosition>
- bool DecodeDebugPositionInfo(uint32_t debug_info_offset,
- DexDebugNewPosition position_functor,
- void* context) const;
+ const IndexToStringData& index_to_string_data,
+ const DexDebugNewPosition& position_functor);
const char* GetSourceFile(const ClassDef& class_def) const {
if (!class_def.source_file_idx_.IsValid()) {
@@ -1013,7 +995,14 @@
// Changes the dex class data pointed to by data_ptr it to not have any hiddenapi flags.
static void UnHideAccessFlags(uint8_t* data_ptr, uint32_t new_access_flags, bool is_method);
- inline IterationRange<ClassIterator> GetClasses() const;
+ // Iterate dex classes and remove hiddenapi flags in fields and methods.
+ void UnhideApis() const;
+
+ IterationRange<ClassIterator> GetClasses() const;
+
+ template <typename Visitor>
+ static uint32_t DecodeDebugInfoParameterNames(const uint8_t** debug_info,
+ const Visitor& visitor);
protected:
// First Dex format version supporting default methods.
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 400c32b..3667c8c 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -25,10 +25,6 @@
#include "standard_dex_file.h"
#include "ziparchive/zip_archive.h"
-// system/core/zip_archive definitions.
-struct ZipEntry;
-typedef void* ZipArchiveHandle;
-
namespace art {
namespace {
@@ -226,16 +222,16 @@
std::string* error_msg) const {
return OpenCommon(base,
size,
- /*data_base*/ nullptr,
- /*data_size*/ 0,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0,
location,
location_checksum,
oat_dex_file,
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
std::unique_ptr<const DexFile> DexFileLoader::OpenWithDataSection(
@@ -259,8 +255,8 @@
verify,
verify_checksum,
error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
+ /*container=*/ nullptr,
+ /*verify_result=*/ nullptr);
}
bool DexFileLoader::OpenAll(
@@ -294,7 +290,7 @@
size,
location,
dex_header->checksum_,
- /*oat_dex_file*/ nullptr,
+ /*oat_dex_file=*/ nullptr,
verify,
verify_checksum,
error_msg));
@@ -414,11 +410,11 @@
std::unique_ptr<const DexFile> dex_file = OpenCommon(
map.data(),
map.size(),
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
+ /*data_base=*/ nullptr,
+ /*data_size=*/ 0u,
location,
zip_entry->GetCrc32(),
- /*oat_dex_file*/ nullptr,
+ /*oat_dex_file=*/ nullptr,
verify,
verify_checksum,
error_msg,
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index 5bb01dd..9c61d1a 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -221,7 +221,7 @@
bool success = dex_file_loader.OpenAll(dex_bytes->data(),
dex_bytes->size(),
location,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
error_code,
error_msg,
@@ -256,9 +256,9 @@
dex_bytes->size(),
location,
location_checksum,
- /* oat_dex_file */ nullptr,
- /* verify */ true,
- /* verify_checksum */ true,
+ /* oat_dex_file= */ nullptr,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_message));
if (expect_success) {
CHECK(dex_file != nullptr) << error_message;
@@ -348,7 +348,7 @@
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -367,7 +367,7 @@
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -386,7 +386,7 @@
ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
dex_bytes.size(),
kLocationString,
- /* verify */ true,
+ /* verify= */ true,
kVerifyChecksum,
&error_code,
&error_msg,
@@ -480,10 +480,6 @@
EXPECT_EQ(raw->StringByTypeIdx(idx), nullptr);
}
-static void Callback(void* context ATTRIBUTE_UNUSED,
- const DexFile::LocalInfo& entry ATTRIBUTE_UNUSED) {
-}
-
TEST_F(DexFileLoaderTest, OpenDexDebugInfoLocalNullType) {
std::vector<uint8_t> dex_bytes;
std::unique_ptr<const DexFile> raw = OpenDexFileInMemoryBase64(kRawDexDebugInfoLocalNullType,
@@ -496,7 +492,7 @@
const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def,
kMethodIdx));
CodeItemDebugInfoAccessor accessor(*raw, code_item, kMethodIdx);
- ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, Callback, nullptr));
+ ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, VoidFunctor()));
}
} // namespace art
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index fd011c8..499a89b 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -341,42 +341,43 @@
bool result =
CheckValidOffsetAndSize(header_->link_off_,
header_->link_size_,
- 0 /* unaligned */,
+ /* alignment= */ 0,
"link") &&
CheckValidOffsetAndSize(header_->map_off_,
header_->map_off_,
- 4,
+ /* alignment= */ 4,
"map") &&
CheckValidOffsetAndSize(header_->string_ids_off_,
header_->string_ids_size_,
- 4,
+ /* alignment= */ 4,
"string-ids") &&
CheckValidOffsetAndSize(header_->type_ids_off_,
header_->type_ids_size_,
- 4,
+ /* alignment= */ 4,
"type-ids") &&
CheckSizeLimit(header_->type_ids_size_, DexFile::kDexNoIndex16, "type-ids") &&
CheckValidOffsetAndSize(header_->proto_ids_off_,
header_->proto_ids_size_,
- 4,
+ /* alignment= */ 4,
"proto-ids") &&
CheckSizeLimit(header_->proto_ids_size_, DexFile::kDexNoIndex16, "proto-ids") &&
CheckValidOffsetAndSize(header_->field_ids_off_,
header_->field_ids_size_,
- 4,
+ /* alignment= */ 4,
"field-ids") &&
CheckValidOffsetAndSize(header_->method_ids_off_,
header_->method_ids_size_,
- 4,
+ /* alignment= */ 4,
"method-ids") &&
CheckValidOffsetAndSize(header_->class_defs_off_,
header_->class_defs_size_,
- 4,
+ /* alignment= */ 4,
"class-defs") &&
CheckValidOffsetAndSize(header_->data_off_,
header_->data_size_,
- 0, // Unaligned, spec doesn't talk about it, even though size
- // is supposed to be a multiple of 4.
+ // Unaligned, spec doesn't talk about it, even though size
+ // is supposed to be a multiple of 4.
+ /* alignment= */ 0,
"data");
return result;
}
@@ -866,7 +867,7 @@
bool DexFileVerifier::CheckEncodedArray() {
DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
- while (size--) {
+ for (; size != 0u; --size) {
if (!CheckEncodedValue()) {
failure_reason_ = StringPrintf("Bad encoded_array value: %s", failure_reason_.c_str());
return false;
@@ -1197,7 +1198,7 @@
ClassAccessor::Method method(*dex_file_, field.ptr_pos_);
if (!CheckIntraClassDataItemMethods(&method,
accessor.NumDirectMethods(),
- nullptr /* direct_it */,
+ /* direct_method= */ nullptr,
0u,
&have_class,
&class_type_index,
@@ -1304,7 +1305,7 @@
}
uint32_t last_addr = 0;
- while (try_items_size--) {
+ for (; try_items_size != 0u; --try_items_size) {
if (UNLIKELY(try_items->start_addr_ < last_addr)) {
ErrorStringPrintf("Out-of_order try_item with start_addr: %x", try_items->start_addr_);
return false;
@@ -1884,7 +1885,7 @@
ptr_ = begin_;
// Check the items listed in the map.
- while (count--) {
+ for (; count != 0u; --count) {
const size_t current_offset = offset;
uint32_t section_offset = item->offset_;
uint32_t section_count = item->size_;
@@ -2554,7 +2555,7 @@
const DexFile::AnnotationSetRefItem* item = list->list_;
uint32_t count = list->size_;
- while (count--) {
+ for (; count != 0u; --count) {
if (item->annotations_off_ != 0 &&
!CheckOffsetToTypeMap(item->annotations_off_, DexFile::kDexTypeAnnotationSetItem)) {
return false;
@@ -2839,7 +2840,7 @@
uint32_t count = map->size_;
// Cross check the items listed in the map.
- while (count--) {
+ for (; count != 0u; --count) {
uint32_t section_offset = item->offset_;
uint32_t section_count = item->size_;
DexFile::MapItemType type = static_cast<DexFile::MapItemType>(item->type_);
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index a22a457..c3180f0 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -107,8 +107,8 @@
bool success = dex_file_loader.OpenAll(dex_bytes.get(),
length,
location,
- /* verify */ true,
- /* verify_checksum */ true,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_code,
error_msg,
&tmp);
@@ -1621,13 +1621,13 @@
dex_file->Begin(),
dex_file->Size(),
"good checksum, no verify",
- /*verify_checksum*/ false,
+ /*verify_checksum=*/ false,
&error_msg));
EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
"good checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
// Bad checksum: !verify_checksum passes verify_checksum fails.
@@ -1638,13 +1638,13 @@
dex_file->Begin(),
dex_file->Size(),
"bad checksum, no verify",
- /*verify_checksum*/ false,
+ /*verify_checksum=*/ false,
&error_msg));
EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
"bad checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
EXPECT_NE(error_msg.find("Bad checksum"), std::string::npos) << error_msg;
}
@@ -1691,7 +1691,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad static method name",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1735,7 +1735,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad virtual method name",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1779,7 +1779,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad clinit signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1823,7 +1823,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad clinit signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -1860,7 +1860,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad init signature",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -2063,7 +2063,7 @@
dex_file->Begin(),
dex_file->Size(),
"good checksum, verify",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
// TODO(oth): Test corruptions (b/35308502)
}
@@ -2110,7 +2110,7 @@
dex_file->Begin(),
dex_file->Size(),
"bad static field initial values array",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
@@ -2166,7 +2166,7 @@
dex_file->Begin(),
dex_file->Size(),
"good static field initial values array",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error_msg));
}
diff --git a/libdexfile/dex/dex_instruction_test.cc b/libdexfile/dex/dex_instruction_test.cc
index 6ce9dba..02400f4 100644
--- a/libdexfile/dex/dex_instruction_test.cc
+++ b/libdexfile/dex/dex_instruction_test.cc
@@ -71,10 +71,13 @@
TEST(Instruction, PropertiesOf45cc) {
uint16_t instruction[4];
- Build45cc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
- 0xcafe /* arg_regs */, instruction);
+ Build45cc(/* num_args= */ 4u,
+ /* method_idx= */ 16u,
+ /* proto_idx= */ 32u,
+ /* arg_regs= */ 0xcafe,
+ instruction);
- DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+ DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
@@ -106,10 +109,13 @@
TEST(Instruction, PropertiesOf4rcc) {
uint16_t instruction[4];
- Build4rcc(4u /* num_vregs */, 16u /* method_idx */, 32u /* proto_idx */,
- 0xcafe /* arg_regs */, instruction);
+ Build4rcc(/* num_args= */ 4u,
+ /* method_idx= */ 16u,
+ /* proto_idx= */ 32u,
+ /* arg_regs_start= */ 0xcafe,
+ instruction);
- DexInstructionIterator ins(instruction, /*dex_pc*/ 0u);
+ DexInstructionIterator ins(instruction, /*dex_pc=*/ 0u);
ASSERT_EQ(4u, ins->SizeInCodeUnits());
ASSERT_TRUE(ins->HasVRegA());
diff --git a/libdexfile/dex/hidden_api_access_flags.h b/libdexfile/dex/hidden_api_access_flags.h
index 1aaeabd..369615d 100644
--- a/libdexfile/dex/hidden_api_access_flags.h
+++ b/libdexfile/dex/hidden_api_access_flags.h
@@ -62,6 +62,7 @@
kLightGreylist,
kDarkGreylist,
kBlacklist,
+ kNoList,
};
static ALWAYS_INLINE ApiList DecodeFromDex(uint32_t dex_access_flags) {
@@ -159,6 +160,9 @@
case HiddenApiAccessFlags::kBlacklist:
os << "blacklist";
break;
+ case HiddenApiAccessFlags::kNoList:
+ os << "no list";
+ break;
}
return os;
}
diff --git a/libdexfile/dex/type_lookup_table.cc b/libdexfile/dex/type_lookup_table.cc
index 00ec358..7d80a2e 100644
--- a/libdexfile/dex/type_lookup_table.cc
+++ b/libdexfile/dex/type_lookup_table.cc
@@ -94,7 +94,7 @@
DCHECK_ALIGNED(raw_data, alignof(Entry));
const Entry* entries = reinterpret_cast<const Entry*>(raw_data);
size_t mask_bits = CalculateMaskBits(num_class_defs);
- return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries */ nullptr);
+ return TypeLookupTable(dex_data_pointer, mask_bits, entries, /* owned_entries= */ nullptr);
}
uint32_t TypeLookupTable::Lookup(const char* str, uint32_t hash) const {
diff --git a/libdexfile/dex/type_reference.h b/libdexfile/dex/type_reference.h
index 9e7b880..3207e32 100644
--- a/libdexfile/dex/type_reference.h
+++ b/libdexfile/dex/type_reference.h
@@ -31,8 +31,8 @@
// A type is located by its DexFile and the string_ids_ table index into that DexFile.
class TypeReference : public DexFileReference {
public:
- TypeReference(const DexFile* file, dex::TypeIndex index)
- : DexFileReference(file, index.index_) {}
+ TypeReference(const DexFile* dex_file, dex::TypeIndex index)
+ : DexFileReference(dex_file, index.index_) {}
dex::TypeIndex TypeIndex() const {
return dex::TypeIndex(index);
diff --git a/libprofile/Android.bp b/libprofile/Android.bp
index b9883f6..edd9fa8 100644
--- a/libprofile/Android.bp
+++ b/libprofile/Android.bp
@@ -56,6 +56,37 @@
export_shared_lib_headers: ["libbase"],
}
+cc_defaults {
+ name: "libprofile_static_base_defaults",
+ static_libs: [
+ "libbase",
+ "libcutils",
+ "libutils",
+ "libz",
+ "libziparchive",
+ ],
+}
+
+cc_defaults {
+ name: "libprofile_static_defaults",
+ defaults: [
+ "libprofile_static_base_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ ],
+ static_libs: ["libprofile"],
+}
+
+cc_defaults {
+ name: "libprofiled_static_defaults",
+ defaults: [
+ "libprofile_static_base_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ ],
+ static_libs: ["libprofiled"],
+}
+
art_cc_library {
name: "libprofile",
defaults: ["libprofile_defaults"],
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index c765345..6bd49a4 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -23,17 +23,21 @@
#include <unistd.h>
#include <zlib.h>
+#include <algorithm>
#include <cerrno>
#include <climits>
#include <cstdlib>
+#include <iostream>
+#include <numeric>
+#include <random>
#include <string>
#include <vector>
-#include <iostream>
#include "android-base/file.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/logging.h" // For VLOG.
#include "base/malloc_arena_pool.h"
#include "base/os.h"
@@ -186,8 +190,8 @@
bool ProfileCompilationInfo::MergeWith(const std::string& filename) {
std::string error;
int flags = O_RDONLY | O_NOFOLLOW | O_CLOEXEC;
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -217,8 +221,8 @@
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
@@ -255,8 +259,8 @@
// There's no need to fsync profile data right away. We get many chances
// to write it again in case something goes wrong. We can rely on a simple
// close(), no sync, and let to the kernel decide when to write to disk.
- ScopedFlock profile_file = LockedFile::Open(filename.c_str(), flags,
- /*block*/false, &error);
+ ScopedFlock profile_file =
+ LockedFile::Open(filename.c_str(), flags, /*block=*/false, &error);
if (profile_file.get() == nullptr) {
LOG(WARNING) << "Couldn't lock the profile file " << filename << ": " << error;
return false;
@@ -1171,7 +1175,8 @@
source->reset(ProfileSource::Create(fd));
return kProfileLoadSuccess;
} else {
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, "profile", error));
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(DupCloexec(fd), "profile", error));
if (zip_archive.get() == nullptr) {
*error = "Could not open the profile zip archive";
return kProfileLoadBadData;
@@ -1388,8 +1393,8 @@
// verify_checksum is false because we want to differentiate between a missing dex data and
// a mismatched checksum.
const DexFileData* dex_data = FindDexData(other_profile_line_header.dex_location,
- 0u,
- false /* verify_checksum */);
+ /* checksum= */ 0u,
+ /* verify_checksum= */ false);
if ((dex_data != nullptr) && (dex_data->checksum != other_profile_line_header.checksum)) {
LOG(WARNING) << "Checksum mismatch for dex " << other_profile_line_header.dex_location;
return false;
@@ -1476,8 +1481,8 @@
// verify_checksum is false because we want to differentiate between a missing dex data and
// a mismatched checksum.
const DexFileData* dex_data = FindDexData(other_dex_data->profile_key,
- 0u,
- /* verify_checksum */ false);
+ /* checksum= */ 0u,
+ /* verify_checksum= */ false);
if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
return false;
@@ -1631,25 +1636,7 @@
return total;
}
-// Produce a non-owning vector from a vector.
-template<typename T>
-const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
- auto non_owning_vector = new std::vector<T*>();
- for (auto& element : *owning_vector) {
- non_owning_vector->push_back(element.get());
- }
- return non_owning_vector;
-}
-
-std::string ProfileCompilationInfo::DumpInfo(
- const std::vector<std::unique_ptr<const DexFile>>* dex_files,
- bool print_full_dex_location) const {
- std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
- MakeNonOwningVector(dex_files));
- return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
-}
-
-std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
+std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>& dex_files,
bool print_full_dex_location) const {
std::ostringstream os;
if (info_.empty()) {
@@ -1672,11 +1659,10 @@
os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
os << " [checksum=" << std::hex << dex_data->checksum << "]" << std::dec;
const DexFile* dex_file = nullptr;
- if (dex_files != nullptr) {
- for (size_t i = 0; i < dex_files->size(); i++) {
- if (dex_data->profile_key == (*dex_files)[i]->GetLocation()) {
- dex_file = (*dex_files)[i];
- }
+ for (const DexFile* current : dex_files) {
+ if (dex_data->profile_key == current->GetLocation() &&
+ dex_data->checksum == current->GetLocationChecksum()) {
+ dex_file = current;
}
}
os << "\n\thot methods: ";
@@ -1843,7 +1829,7 @@
flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
profile_key,
- /*method_idx*/ 0,
+ /*checksum=*/ 0,
method_idx,
max_method);
}
@@ -1870,43 +1856,42 @@
uint16_t method_percentage,
uint16_t class_percentage,
uint32_t random_seed) {
- std::srand(random_seed);
ProfileCompilationInfo info;
+ std::default_random_engine rng(random_seed);
+ auto create_shuffled_range = [&rng](uint32_t take, uint32_t out_of) {
+ CHECK_LE(take, out_of);
+ std::vector<uint32_t> vec(out_of);
+ std::iota(vec.begin(), vec.end(), 0u);
+ std::shuffle(vec.begin(), vec.end(), rng);
+ vec.erase(vec.begin() + take, vec.end());
+ std::sort(vec.begin(), vec.end());
+ return vec;
+ };
for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
const std::string& location = dex_file->GetLocation();
uint32_t checksum = dex_file->GetLocationChecksum();
uint32_t number_of_classes = dex_file->NumClassDefs();
uint32_t classes_required_in_profile = (number_of_classes * class_percentage) / 100;
- uint32_t class_start_index = rand() % number_of_classes;
- for (uint32_t i = 0; i < number_of_classes && classes_required_in_profile; ++i) {
- if (number_of_classes - i == classes_required_in_profile ||
- std::rand() % (number_of_classes - i - classes_required_in_profile) == 0) {
- uint32_t class_index = (i + class_start_index) % number_of_classes;
- info.AddClassIndex(location,
- checksum,
- dex_file->GetClassDef(class_index).class_idx_,
- dex_file->NumMethodIds());
- classes_required_in_profile--;
- }
+ for (uint32_t class_index : create_shuffled_range(classes_required_in_profile,
+ number_of_classes)) {
+ info.AddClassIndex(location,
+ checksum,
+ dex_file->GetClassDef(class_index).class_idx_,
+ dex_file->NumMethodIds());
}
uint32_t number_of_methods = dex_file->NumMethodIds();
uint32_t methods_required_in_profile = (number_of_methods * method_percentage) / 100;
- uint32_t method_start_index = rand() % number_of_methods;
- for (uint32_t i = 0; i < number_of_methods && methods_required_in_profile; ++i) {
- if (number_of_methods - i == methods_required_in_profile ||
- std::rand() % (number_of_methods - i - methods_required_in_profile) == 0) {
- uint32_t method_index = (method_start_index + i) % number_of_methods;
- // Alternate between startup and post startup.
- uint32_t flags = MethodHotness::kFlagHot;
- flags |= ((method_index & 1) != 0)
- ? MethodHotness::kFlagPostStartup
- : MethodHotness::kFlagStartup;
- info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
- MethodReference(dex_file.get(), method_index));
- methods_required_in_profile--;
- }
+ for (uint32_t method_index : create_shuffled_range(methods_required_in_profile,
+ number_of_methods)) {
+ // Alternate between startup and post startup.
+ uint32_t flags = MethodHotness::kFlagHot;
+ flags |= ((method_index & 1) != 0)
+ ? MethodHotness::kFlagPostStartup
+ : MethodHotness::kFlagStartup;
+ info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
+ MethodReference(dex_file.get(), method_index));
}
}
return info.Save(fd);
@@ -1990,20 +1975,20 @@
MethodHotness::Flag flags) {
DCHECK_LT(index, num_method_ids);
if ((flags & MethodHotness::kFlagStartup) != 0) {
- method_bitmap.StoreBit(MethodBitIndex(/*startup*/ true, index), /*value*/ true);
+ method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ true, index), /*value=*/ true);
}
if ((flags & MethodHotness::kFlagPostStartup) != 0) {
- method_bitmap.StoreBit(MethodBitIndex(/*startup*/ false, index), /*value*/ true);
+ method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ false, index), /*value=*/ true);
}
}
ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHotnessInfo(
uint32_t dex_method_index) const {
MethodHotness ret;
- if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ true, dex_method_index))) {
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ true, dex_method_index))) {
ret.AddFlag(MethodHotness::kFlagStartup);
}
- if (method_bitmap.LoadBit(MethodBitIndex(/*startup*/ false, dex_method_index))) {
+ if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ false, dex_method_index))) {
ret.AddFlag(MethodHotness::kFlagPostStartup);
}
auto it = method_map.find(dex_method_index);
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 0dbf490..92fa098 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -377,12 +377,10 @@
uint16_t dex_method_index) const;
// Dump all the loaded profile info into a string and returns it.
- // If dex_files is not null then the method indices will be resolved to their
+ // If dex_files is not empty then the method indices will be resolved to their
// names.
// This is intended for testing and debugging.
- std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
- bool print_full_dex_location = true) const;
- std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
+ std::string DumpInfo(const std::vector<const DexFile*>& dex_files,
bool print_full_dex_location = true) const;
// Return the classes and methods for a given dex file through out args. The out args are the set
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 417abaa..a2bfe50 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -43,22 +43,22 @@
protected:
bool AddMethod(const std::string& dex_location,
uint32_t checksum,
- uint16_t method_index,
+ uint16_t method_idx,
ProfileCompilationInfo* info) {
return info->AddMethodIndex(Hotness::kFlagHot,
dex_location,
checksum,
- method_index,
+ method_idx,
kMaxMethodIds);
}
bool AddMethod(const std::string& dex_location,
uint32_t checksum,
- uint16_t method_index,
+ uint16_t method_idx,
const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
ProfileCompilationInfo* info) {
return info->AddMethod(
- dex_location, checksum, method_index, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
+ dex_location, checksum, method_idx, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
}
bool AddClass(const std::string& dex_location,
@@ -115,9 +115,9 @@
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */2, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum= */2, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
return pmi;
}
@@ -148,8 +148,8 @@
ScratchFile profile;
ProfileCompilationInfo saved_info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -207,8 +207,8 @@
ProfileCompilationInfo saved_info;
// Save a few methods.
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -221,9 +221,9 @@
// Save more methods.
for (uint16_t i = 0; i < 100; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location3", /* checksum */ 3, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location3", /* checksum= */ 3, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -240,19 +240,19 @@
ScratchFile profile;
ProfileCompilationInfo info;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info));
// Trying to add info for an existing file but with a different checksum.
- ASSERT_FALSE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info));
+ ASSERT_FALSE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info));
}
TEST_F(ProfileCompilationInfoTest, MergeFail) {
ScratchFile profile;
ProfileCompilationInfo info1;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
// Use the same file, change the checksum.
ProfileCompilationInfo info2;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
ASSERT_FALSE(info1.MergeWith(info2));
}
@@ -262,10 +262,10 @@
ScratchFile profile;
ProfileCompilationInfo info1;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 1, /* method_idx */ 1, &info1));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
// Use the same file, change the checksum.
ProfileCompilationInfo info2;
- ASSERT_TRUE(AddMethod("dex_location", /* checksum */ 2, /* method_idx */ 2, &info2));
+ ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
ASSERT_TRUE(info1.Save(profile.GetFd()));
ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -280,13 +280,13 @@
ProfileCompilationInfo saved_info;
// Save the maximum number of methods
for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
}
// Save the maximum number of classes
for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
- ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -390,7 +390,7 @@
ProfileCompilationInfo saved_info;
// Save the maximum number of methods
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -415,9 +415,9 @@
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the
// class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -431,11 +431,11 @@
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- loaded_info.GetMethod("dex_location4", /* checksum */ 4, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -448,7 +448,7 @@
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -459,7 +459,7 @@
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
MakeMegamorphic(&pmi_extra);
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -477,7 +477,7 @@
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
@@ -491,7 +491,7 @@
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -502,7 +502,7 @@
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
MakeMegamorphic(&pmi_extra);
for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
// Mark all inline caches with missing types and add them to the profile again.
@@ -510,7 +510,7 @@
ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
SetIsMissingTypes(&missing_types);
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
}
ASSERT_TRUE(profile.GetFile()->ResetOffset());
@@ -528,7 +528,7 @@
ASSERT_TRUE(loaded_info.Equals(saved_info));
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ 3);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
}
@@ -542,8 +542,8 @@
// Modify the checksum to trigger a mismatch.
pmi2.dex_references[0].dex_checksum++;
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /*method_idx*/ 0, pmi1, &info));
- ASSERT_FALSE(AddMethod("dex_location2", /* checksum */ 2, /*method_idx*/ 0, pmi2, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /*method_idx=*/ 0, pmi1, &info));
+ ASSERT_FALSE(AddMethod("dex_location2", /* checksum= */ 2, /*method_idx=*/ 0, pmi2, &info));
}
// Verify that profiles behave correctly even if the methods are added in a different
@@ -556,8 +556,8 @@
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(0, dex::TypeIndex(0));
@@ -567,8 +567,8 @@
ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
- pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum */ 2, kMaxMethodIds);
- pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
+ pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.AddClass(1, dex::TypeIndex(0));
@@ -579,15 +579,15 @@
// Profile 1 and Profile 2 get the same methods but in different order.
// This will trigger a different dex numbers.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &info));
}
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
ASSERT_TRUE(AddMethod(
- "dex_location2", /* checksum */ 2, method_idx, pmi_reindexed, &info_reindexed));
+ "dex_location2", /* checksum= */ 2, method_idx, pmi_reindexed, &info_reindexed));
ASSERT_TRUE(AddMethod(
- "dex_location1", /* checksum */ 1, method_idx, pmi_reindexed, &info_reindexed));
+ "dex_location1", /* checksum= */ 1, method_idx, pmi_reindexed, &info_reindexed));
}
ProfileCompilationInfo info_backup;
@@ -597,11 +597,11 @@
ASSERT_TRUE(info.Equals(info_backup));
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- info.GetMethod("dex_location2", /* checksum */ 2, method_idx);
+ info.GetMethod("dex_location2", /* dex_checksum= */ 2, method_idx);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -612,34 +612,34 @@
// Save a few methods.
for (uint16_t i = 0; i < std::numeric_limits<uint8_t>::max(); i++) {
std::string dex_location = std::to_string(i);
- ASSERT_TRUE(AddMethod(dex_location, /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod(dex_location, /* checksum= */ 1, /* method_idx= */ i, &info));
}
// We only support at most 255 dex files.
ASSERT_FALSE(AddMethod(
- /*dex_location*/ "256", /* checksum */ 1, /* method_idx */ 0, &info));
+ /*dex_location=*/ "256", /* checksum= */ 1, /* method_idx= */ 0, &info));
}
TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
// Create a megamorphic inline cache.
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMegamorphic();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
pmi,
&info_megamorphic));
// Create a profile with no inline caches (for the same method).
ProfileCompilationInfo info_no_inline_cache;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
&info_no_inline_cache));
// Merge the megamorphic cache into the empty one.
@@ -653,23 +653,23 @@
// Create an inline cache with missing types
ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1, kMaxMethodIds);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
dex_pc_data.SetIsMissingTypes();
ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
pmi,
&info_megamorphic));
// Create a profile with no inline caches (for the same method).
ProfileCompilationInfo info_no_inline_cache;
ASSERT_TRUE(AddMethod("dex_location1",
- /*checksum*/ 1,
- /*method_idx*/ 0,
+ /*checksum=*/ 1,
+ /*method_idx=*/ 0,
&info_no_inline_cache));
// Merge the missing type cache into the empty one.
@@ -766,26 +766,26 @@
TEST_F(ProfileCompilationInfoTest, LoadFromZipCompress) {
TestProfileLoadFromZip("primary.prof",
ZipWriter::kCompress | ZipWriter::kAlign32,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipUnCompress) {
TestProfileLoadFromZip("primary.prof",
ZipWriter::kAlign32,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipUnAligned) {
TestProfileLoadFromZip("primary.prof",
0,
- /*should_succeed*/true);
+ /*should_succeed=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadZipEntry) {
TestProfileLoadFromZip("invalid.profile.entry",
0,
- /*should_succeed*/true,
- /*should_succeed_with_empty_profile*/true);
+ /*should_succeed=*/true,
+ /*should_succeed_with_empty_profile=*/true);
}
TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadProfile) {
@@ -835,7 +835,7 @@
info.AddMethodIndex(Hotness::kFlagHot,
old_name,
dex->GetLocationChecksum(),
- /* method_idx */ 0,
+ /* method_idx= */ 0,
dex->NumMethodIds());
}
@@ -845,7 +845,7 @@
// Verify that we find the methods when searched with the original dex files.
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi != nullptr);
}
}
@@ -856,9 +856,9 @@
ProfileCompilationInfo info;
info.AddMethodIndex(Hotness::kFlagHot,
"my.app",
- /* checksum */ 123,
- /* method_idx */ 0,
- /* num_method_ids */ 10);
+ /* checksum= */ 123,
+ /* method_idx= */ 0,
+ /* num_method_ids= */ 10);
// Update the profile keys based on the original dex files
ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
@@ -867,13 +867,13 @@
// location.
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi == nullptr);
}
// Verify that we can find the original entry.
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
- info.GetMethod("my.app", /* checksum */ 123, /* method_idx */ 0);
+ info.GetMethod("my.app", /* dex_checksum= */ 123, /* dex_method_index= */ 0);
ASSERT_TRUE(loaded_pmi != nullptr);
}
@@ -892,7 +892,7 @@
info.AddMethodIndex(Hotness::kFlagHot,
old_name,
dex->GetLocationChecksum(),
- /* method_idx */ 0,
+ /* method_idx= */ 0,
dex->NumMethodIds());
}
@@ -900,8 +900,8 @@
// This will cause the rename to fail because an existing entry would already have that name.
info.AddMethodIndex(Hotness::kFlagHot,
dex_files[0]->GetLocation(),
- /* checksum */ 123,
- /* method_idx */ 0,
+ /* checksum= */ 123,
+ /* method_idx= */ 0,
dex_files[0]->NumMethodIds());
ASSERT_FALSE(info.UpdateProfileKeys(dex_files));
@@ -916,10 +916,10 @@
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -941,8 +941,12 @@
// Dex location 2 and 4 should have been filtered out
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
- ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2", /* checksum */ 2, method_idx));
- ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx));
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2",
+ /* dex_checksum= */ 2,
+ method_idx));
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4",
+ /* dex_checksum= */ 4,
+ method_idx));
}
// Dex location 1 should have all all the inline caches referencing dex location 2 set to
@@ -950,7 +954,7 @@
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// The methods for dex location 1 should be in the profile data.
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ method_idx);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
// Verify the inline cache.
@@ -989,8 +993,8 @@
ProfileCompilationInfo::OfflineProfileMethodInfo expected_pmi(ic_map);
// The dex references should not have dex_location2 in the list.
- expected_pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
- expected_pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+ expected_pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
+ expected_pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
// Now check that we get back what we expect.
ASSERT_TRUE(*loaded_pmi1 == expected_pmi);
@@ -1006,10 +1010,10 @@
// Add methods with inline caches.
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
- ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1038,9 +1042,9 @@
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
// Add a method which is part of the same dex file as one of the
// class from the inline caches.
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
// Add a method which is outside the set of dex files.
- ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1060,13 +1064,13 @@
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
- loaded_info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
ASSERT_TRUE(loaded_pmi1 != nullptr);
ASSERT_TRUE(*loaded_pmi1 == pmi);
}
for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
- loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx);
+ loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, method_idx);
ASSERT_TRUE(loaded_pmi2 != nullptr);
ASSERT_TRUE(*loaded_pmi2 == pmi);
}
@@ -1081,8 +1085,8 @@
ProfileCompilationInfo saved_info;
uint16_t item_count = 1000;
for (uint16_t i = 0; i < item_count; i++) {
- ASSERT_TRUE(AddClass("dex_location1", /* checksum */ 1, dex::TypeIndex(i), &saved_info));
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
}
ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1101,7 +1105,7 @@
// Compute the expectation.
ProfileCompilationInfo expected_info;
for (uint16_t i = 0; i < item_count; i++) {
- ASSERT_TRUE(AddClass("dex_location2", /* checksum */ 2, dex::TypeIndex(i), &expected_info));
+ ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &expected_info));
}
// Validate the expectation.
@@ -1112,7 +1116,7 @@
TEST_F(ProfileCompilationInfoTest, ClearData) {
ProfileCompilationInfo info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
}
ASSERT_FALSE(IsEmpty(info));
info.ClearData();
@@ -1122,7 +1126,7 @@
TEST_F(ProfileCompilationInfoTest, ClearDataAndSave) {
ProfileCompilationInfo info;
for (uint16_t i = 0; i < 10; i++) {
- ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &info));
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
}
info.ClearData();
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 3cd8ae0..596a946 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -37,6 +37,7 @@
shared_libs: [
"libart",
"libart-compiler",
+ "libart-dexlayout",
"libart-disassembler",
"libdexfile",
"libartbase",
@@ -54,6 +55,7 @@
shared_libs: [
"libartd",
"libartd-compiler",
+ "libartd-dexlayout",
"libartd-disassembler",
"libdexfiled",
"libartbased",
@@ -66,7 +68,9 @@
name: "oatdumps-defaults",
device_supported: false,
static_executable: true,
- defaults: ["oatdump-defaults"],
+ defaults: [
+ "oatdump-defaults",
+ ],
target: {
darwin: {
enabled: false,
@@ -80,18 +84,21 @@
// Try to get rid of it.
"-z muldefs",
],
- static_libs: art_static_dependencies,
+ static_libs: ["libsigchain_dummy"],
}
art_cc_binary {
name: "oatdumps",
- defaults: ["oatdumps-defaults"],
+ defaults: [
+ "libart_static_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ "libart-compiler_static_defaults",
+ "libart-dexlayout_static_defaults",
+ "oatdumps-defaults",
+ ],
static_libs: [
- "libart",
- "libdexfile",
- "libprofile",
- "libartbase",
- "libart-compiler",
"libart-disassembler",
"libvixl-arm",
"libvixl-arm64",
@@ -102,6 +109,12 @@
name: "oatdumpds",
defaults: [
"art_debug_defaults",
+ "libartd_static_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ "libartd-compiler_static_defaults",
+ "libartd-dexlayout_static_defaults",
"oatdumps-defaults",
],
target: {
@@ -110,15 +123,11 @@
},
},
static_libs: [
- "libartd",
- "libdexfiled",
- "libprofiled",
- "libartbased",
- "libartd-compiler",
"libartd-disassembler",
"libvixld-arm",
"libvixld-arm64",
],
+ group_static_libs: true,
}
art_cc_test {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 91283d6..793245b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -28,6 +28,7 @@
#include <vector>
#include "android-base/logging.h"
+#include "android-base/parseint.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
@@ -48,6 +49,7 @@
#include "debug/debug_info.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/descriptors_names.h"
@@ -55,6 +57,7 @@
#include "dex/dex_instruction-inl.h"
#include "dex/string_reference.h"
#include "dex/type_lookup_table.h"
+#include "dexlayout.h"
#include "disassembler.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/space/image_space.h"
@@ -221,7 +224,7 @@
debug::WriteDebugInfo(builder_.get(),
debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
- true /* write_oat_patches */);
+ /* write_oat_patches= */ true);
builder_->End();
@@ -398,7 +401,7 @@
options_.absolute_addresses_,
oat_file.Begin(),
oat_file.End(),
- true /* can_read_literals_ */,
+ /* can_read_literals_= */ true,
Is64BitInstructionSet(instruction_set_)
? &Thread::DumpThreadOffset<PointerSize::k64>
: &Thread::DumpThreadOffset<PointerSize::k32>))) {
@@ -416,7 +419,7 @@
return instruction_set_;
}
- typedef std::vector<std::unique_ptr<const DexFile>> DexFileUniqV;
+ using DexFileUniqV = std::vector<std::unique_ptr<const DexFile>>;
bool Dump(std::ostream& os) {
bool success = true;
@@ -470,17 +473,9 @@
GetQuickToInterpreterBridgeOffset);
#undef DUMP_OAT_HEADER_OFFSET
- os << "IMAGE PATCH DELTA:\n";
- os << StringPrintf("%d (0x%08x)\n\n",
- oat_header.GetImagePatchDelta(),
- oat_header.GetImagePatchDelta());
-
os << "IMAGE FILE LOCATION OAT CHECKSUM:\n";
os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatChecksum());
- os << "IMAGE FILE LOCATION OAT BEGIN:\n";
- os << StringPrintf("0x%08x\n\n", oat_header.GetImageFileLocationOatDataBegin());
-
// Print the key-value store.
{
os << "KEY VALUE STORE:\n";
@@ -632,8 +627,64 @@
const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
CHECK(vdex_dex_file != nullptr);
- if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get())) {
- success = false;
+
+ // Remove hiddenapis
+ vdex_dex_file->UnhideApis();
+
+ // If a CompactDex file is detected within a Vdex container, DexLayout is used to convert
+ // back to a StandardDex file. Since the converted DexFile will most likely not reproduce
+ // the original input Dex file, the `update_checksum_` option is used to recompute the
+ // checksum. If the vdex container does not contain cdex resources (`used_dexlayout` is
+ // false), ExportDexFile() enforces a reproducible checksum verification.
+ if (vdex_dex_file->IsCompactDexFile()) {
+ Options options;
+ options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+ options.update_checksum_ = true;
+ DexLayout dex_layout(options, /*info=*/ nullptr, /*out_file=*/ nullptr, /*header=*/ nullptr);
+ std::unique_ptr<art::DexContainer> dex_container;
+ bool result = dex_layout.ProcessDexFile(vdex_dex_file->GetLocation().c_str(),
+ vdex_dex_file.get(),
+ i,
+ &dex_container,
+ &error_msg);
+ if (!result) {
+ os << "DexLayout failed to process Dex file: " + error_msg;
+ success = false;
+ break;
+ }
+ DexContainer::Section* main_section = dex_container->GetMainSection();
+ CHECK_EQ(dex_container->GetDataSection()->Size(), 0u);
+
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open(
+ main_section->Begin(),
+ main_section->Size(),
+ vdex_dex_file->GetLocation(),
+ vdex_file->GetLocationChecksum(i),
+ /*oat_dex_file=*/ nullptr,
+ /*verify=*/ false,
+ /*verify_checksum=*/ true,
+ &error_msg));
+ if (dex == nullptr) {
+ os << "Failed to load DexFile from layout container: " + error_msg;
+ success = false;
+ break;
+ }
+ if (dex->IsCompactDexFile()) {
+ os <<"CompactDex conversion to StandardDex failed";
+ success = false;
+ break;
+ }
+
+ if (!ExportDexFile(os, *oat_dex_file, dex.get(), /*used_dexlayout=*/ true)) {
+ success = false;
+ break;
+ }
+ } else {
+ if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get(), /*used_dexlayout=*/ false)) {
+ success = false;
+ break;
+ }
}
i++;
}
@@ -713,8 +764,8 @@
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
file->Fd(),
- /* start offset */ 0,
- /* low_4gb */ false,
+ /* start offset= */ 0,
+ /* low_4gb= */ false,
vdex_filename.c_str(),
error_msg);
if (!mmap.IsValid()) {
@@ -735,7 +786,7 @@
}
vdex_file->Unquicken(MakeNonOwningPointerVector(tmp_dex_files),
- /* decompile_return_instruction */ true);
+ /* decompile_return_instruction= */ true);
*dex_files = std::move(tmp_dex_files);
return vdex_file;
@@ -905,10 +956,16 @@
// Dex resource is extracted from the oat_dex_file and its checksum is repaired since it's not
// unquickened. Otherwise the dex_file has been fully unquickened and is expected to verify the
// original checksum.
- bool ExportDexFile(std::ostream& os, const OatDexFile& oat_dex_file, const DexFile* dex_file) {
+ bool ExportDexFile(std::ostream& os,
+ const OatDexFile& oat_dex_file,
+ const DexFile* dex_file,
+ bool used_dexlayout) {
std::string error_msg;
std::string dex_file_location = oat_dex_file.GetDexFileLocation();
- size_t fsize = oat_dex_file.FileSize();
+
+ // If dex_file (from unquicken or dexlayout) is not available, the output DexFile size is the
+ // same as the one extracted from the Oat container (pre-oreo)
+ size_t fsize = dex_file == nullptr ? oat_dex_file.FileSize() : dex_file->Size();
// Some quick checks just in case
if (fsize == 0 || fsize < sizeof(DexFile::Header)) {
@@ -928,27 +985,19 @@
reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
dex_file->CalculateChecksum();
} else {
- // Vdex unquicken output should match original input bytecode
- uint32_t orig_checksum =
- reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
- CHECK_EQ(orig_checksum, dex_file->CalculateChecksum());
- if (orig_checksum != dex_file->CalculateChecksum()) {
- os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
- return false;
+ // If dexlayout was used to convert CompactDex back to StandardDex, checksum will be updated
+ // due to `update_checksum_` option, otherwise we expect a reproducible checksum.
+ if (!used_dexlayout) {
+ // Vdex unquicken output should match original input bytecode
+ uint32_t orig_checksum =
+ reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
+ if (orig_checksum != dex_file->CalculateChecksum()) {
+ os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
+ return false;
+ }
}
}
- // Update header for shared section.
- uint32_t shared_section_offset = 0u;
- uint32_t shared_section_size = 0u;
- if (dex_file->IsCompactDexFile()) {
- CompactDexFile::Header* const header =
- reinterpret_cast<CompactDexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()));
- shared_section_offset = header->data_off_;
- shared_section_size = header->data_size_;
- // The shared section will be serialized right after the dex file.
- header->data_off_ = header->file_size_;
- }
// Verify output directory exists
if (!OS::DirectoryExists(options_.export_dex_location_)) {
// TODO: Extend OS::DirectoryExists if symlink support is required
@@ -1002,15 +1051,6 @@
return false;
}
- if (shared_section_size != 0) {
- success = file->WriteFully(dex_file->Begin() + shared_section_offset, shared_section_size);
- if (!success) {
- os << "Failed to write shared data section";
- file->Erase();
- return false;
- }
- }
-
if (file->FlushCloseOrErase() != 0) {
os << "Flush and close failed";
return false;
@@ -1474,7 +1514,7 @@
}
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
- class_def, code_item, method, method_access_flags);
+ class_def, code_item, method, method_access_flags, /* api_level= */ 0);
}
return nullptr;
@@ -1755,8 +1795,6 @@
os << "PATCH DELTA:" << image_header_.GetPatchDelta() << "\n\n";
- os << "COMPILE PIC: " << (image_header_.CompilePic() ? "yes" : "no") << "\n\n";
-
{
os << "ROOTS: " << reinterpret_cast<void*>(image_header_.GetImageRoots().Ptr()) << "\n";
static_assert(arraysize(image_roots_descriptions_) ==
@@ -1822,14 +1860,13 @@
oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(oat_location);
}
if (oat_file == nullptr) {
- oat_file = OatFile::Open(/* zip_fd */ -1,
+ oat_file = OatFile::Open(/*zip_fd=*/ -1,
oat_location,
oat_location,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg);
}
if (oat_file == nullptr) {
@@ -2440,7 +2477,7 @@
size_t bytes;
size_t count;
};
- typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable;
+ using SizeAndCountTable = SafeMap<std::string, SizeAndCount>;
SizeAndCountTable sizes_and_counts;
void Update(const char* descriptor, size_t object_bytes_in) {
@@ -2720,14 +2757,13 @@
// We need to map the oat file in the low 4gb or else the fixup wont be able to fit oat file
// pointers into 32 bit pointer sized ArtMethods.
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
options->app_oat_,
options->app_oat_,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ true,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ true,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2844,14 +2880,13 @@
<< "oatdump might fail if the oat file does not contain the dex code.";
}
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_filename,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2870,14 +2905,13 @@
std::string& output_name,
bool no_bits) {
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_filename,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -2918,14 +2952,13 @@
if (oat_filename != nullptr) {
std::string error_msg;
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base */ nullptr,
- /* executable */ false,
- /*low_4gb*/false,
+ /*executable=*/ false,
+ /*low_4gb=*/false,
dex_filename,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
if (oat_file == nullptr) {
LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
@@ -3388,7 +3421,7 @@
} else if (option.starts_with("--export-dex-to=")) {
export_dex_location_ = option.substr(strlen("--export-dex-to=")).data();
} else if (option.starts_with("--addr2instr=")) {
- if (!ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
+ if (!android::base::ParseUint(option.substr(strlen("--addr2instr=")).data(), &addr2instr_)) {
*error_msg = "Address conversion failed";
return kParseError;
}
@@ -3431,7 +3464,7 @@
return kParseOk;
}
- virtual std::string GetUsage() const {
+ std::string GetUsage() const override {
std::string usage;
usage +=
@@ -3585,7 +3618,7 @@
}
}
- virtual bool ExecuteWithRuntime(Runtime* runtime) {
+ bool ExecuteWithRuntime(Runtime* runtime) override {
CHECK(args_ != nullptr);
if (!args_->imt_dump_.empty() || args_->imt_stat_dump_) {
diff --git a/oatdump/oatdump_app_test.cc b/oatdump/oatdump_app_test.cc
index a344286..2b04a0d 100644
--- a/oatdump/oatdump_app_test.cc
+++ b/oatdump/oatdump_app_test.cc
@@ -28,14 +28,4 @@
ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode));
}
-TEST_F(OatDumpTest, TestPicAppWithBootImage) {
- ASSERT_TRUE(GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}));
- ASSERT_TRUE(Exec(kDynamic, kModeOatWithBootImage, {}, kListAndCode));
-}
-TEST_F(OatDumpTest, TestPicAppWithBootImageStatic) {
- TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
- ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}));
- ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode));
-}
-
} // namespace art
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index bcba182..e6936f6 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -93,8 +93,8 @@
ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly));
const std::string dex_location = tmp_dir_+ "/core-oj-hostdex.jar_export.dex";
const std::string dexdump2 = GetExecutableFilePath("dexdump2",
- /*is_debug*/false,
- /*is_static*/false);
+ /*is_debug=*/false,
+ /*is_static=*/false);
std::string output;
auto post_fork_fn = []() { return true; };
ForkAndExecResult res = ForkAndExec({dexdump2, "-d", dex_location}, post_fork_fn, &output);
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 59f61e2..4bc33b6 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -313,10 +313,10 @@
return StackUtil::GetFrameCount(env, thread, count_ptr);
}
- static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ static jvmtiError PopFrame(jvmtiEnv* env, jthread thread) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_pop_frame);
- return ERR(NOT_IMPLEMENTED);
+ return StackUtil::PopFrame(env, thread);
}
static jvmtiError GetFrameLocation(jvmtiEnv* env,
@@ -506,13 +506,15 @@
static jvmtiError IterateOverInstancesOfClass(
jvmtiEnv* env,
- jclass klass ATTRIBUTE_UNUSED,
- jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
- jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
- const void* user_data ATTRIBUTE_UNUSED) {
+ jclass klass,
+ jvmtiHeapObjectFilter object_filter,
+ jvmtiHeapObjectCallback heap_object_callback,
+ const void* user_data) {
ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
- return ERR(NOT_IMPLEMENTED);
+ HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
+ return heap_util.IterateOverInstancesOfClass(
+ env, klass, object_filter, heap_object_callback, user_data);
}
static jvmtiError GetLocalObject(jvmtiEnv* env,
@@ -1193,7 +1195,7 @@
#undef ADD_CAPABILITY
gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
- /*added*/true);
+ /*added=*/true);
return ret;
}
@@ -1217,7 +1219,7 @@
#undef DEL_CAPABILITY
gEventHandler->HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
- /*added*/false);
+ /*added=*/false);
return OK;
}
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 82f3866..1218e3b 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -249,7 +249,7 @@
.can_get_owned_monitor_info = 1,
.can_get_current_contended_monitor = 1,
.can_get_monitor_info = 1,
- .can_pop_frame = 0,
+ .can_pop_frame = 1,
.can_redefine_classes = 1,
.can_signal_thread = 1,
.can_get_source_file_name = 1,
@@ -291,6 +291,7 @@
// can_retransform_classes:
// can_redefine_any_class:
// can_redefine_classes:
+// can_pop_frame:
// We need to ensure that inlined code is either not present or can always be deoptimized. This
// is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
// on a threads stack.
@@ -303,7 +304,7 @@
.can_get_owned_monitor_info = 0,
.can_get_current_contended_monitor = 0,
.can_get_monitor_info = 0,
- .can_pop_frame = 0,
+ .can_pop_frame = 1,
.can_redefine_classes = 1,
.can_signal_thread = 0,
.can_get_source_file_name = 0,
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index d20c756..8bac38a 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -289,7 +289,7 @@
uninterruptible_cause_ = critical_section_.Enter(art::gc::kGcCauseInstrumentation,
art::gc::kCollectorTypeCriticalSection);
art::Runtime::Current()->GetThreadList()->SuspendAll("JMVTI Deoptimizing methods",
- /*long_suspend*/ false);
+ /*long_suspend=*/ false);
}
~ScopedDeoptimizationContext()
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index e98517f..ca66556 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -26,7 +26,9 @@
#include "jni/jni_internal.h"
#include "nativehelper/scoped_local_ref.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack.h"
#include "ti_breakpoint.h"
+#include "ti_thread.h"
#include "art_jvmti.h"
@@ -359,6 +361,7 @@
// have to deal with use-after-free or the frames being reallocated later.
art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
return env->notify_frames.erase(frame) != 0 &&
+ !frame->GetForcePopFrame() &&
ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
}
@@ -418,6 +421,67 @@
ExecuteCallback<ArtJvmtiEvent::kFramePop>(event, jnienv, jni_thread, jmethod, is_exception);
}
+struct ScopedDisablePopFrame {
+ public:
+ explicit ScopedDisablePopFrame(art::Thread* thread) : thread_(thread) {
+ art::Locks::mutator_lock_->AssertSharedHeld(thread_);
+ art::MutexLock mu(thread_, *art::Locks::thread_list_lock_);
+ JvmtiGlobalTLSData* data = ThreadUtil::GetOrCreateGlobalTLSData(thread_);
+ current_top_frame_ = art::StackVisitor::ComputeNumFrames(
+ thread_, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ old_disable_frame_pop_depth_ = data->disable_pop_frame_depth;
+ data->disable_pop_frame_depth = current_top_frame_;
+ DCHECK(old_disable_frame_pop_depth_ == JvmtiGlobalTLSData::kNoDisallowedPopFrame ||
+ current_top_frame_ > old_disable_frame_pop_depth_)
+ << "old: " << old_disable_frame_pop_depth_ << " current: " << current_top_frame_;
+ }
+
+ ~ScopedDisablePopFrame() {
+ art::Locks::mutator_lock_->AssertSharedHeld(thread_);
+ art::MutexLock mu(thread_, *art::Locks::thread_list_lock_);
+ JvmtiGlobalTLSData* data = ThreadUtil::GetGlobalTLSData(thread_);
+ DCHECK_EQ(data->disable_pop_frame_depth, current_top_frame_);
+ data->disable_pop_frame_depth = old_disable_frame_pop_depth_;
+ }
+
+ private:
+ art::Thread* thread_;
+ size_t current_top_frame_;
+ size_t old_disable_frame_pop_depth_;
+};
+// We want to prevent the use of PopFrame when reporting either of these events.
+template <ArtJvmtiEvent kEvent>
+inline void EventHandler::DispatchClassLoadOrPrepareEvent(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jclass klass) const {
+ ScopedDisablePopFrame sdpf(thread);
+ art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+ std::vector<impl::EventHandlerFunc<kEvent>> events = CollectEvents<kEvent>(thread,
+ jnienv,
+ jni_thread,
+ klass);
+
+ for (auto event : events) {
+ ExecuteCallback<kEvent>(event, jnienv, jni_thread, klass);
+ }
+}
+
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassLoad>(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jclass klass) const {
+ DispatchClassLoadOrPrepareEvent<ArtJvmtiEvent::kClassLoad>(thread, jnienv, jni_thread, klass);
+}
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kClassPrepare>(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jclass klass) const {
+ DispatchClassLoadOrPrepareEvent<ArtJvmtiEvent::kClassPrepare>(thread, jnienv, jni_thread, klass);
+}
+
// Need to give a custom specialization for NativeMethodBind since it has to deal with an out
// variable.
template <>
@@ -553,6 +617,7 @@
: ArtJvmtiEvent::kClassFileLoadHookRetransformable;
return (added && caps.can_access_local_variables == 1) ||
caps.can_generate_breakpoint_events == 1 ||
+ caps.can_pop_frame == 1 ||
(caps.can_retransform_classes == 1 &&
IsEventEnabledAnywhere(event) &&
env->event_masks.IsEnabledAnywhere(event));
@@ -573,6 +638,11 @@
if (caps.can_generate_breakpoint_events == 1) {
HandleBreakpointEventsChanged(added);
}
+ if (caps.can_pop_frame == 1 && added) {
+ // TODO We should keep track of how many of these have been enabled and remove it if there are
+ // no more possible users. This isn't expected to be too common.
+ art::Runtime::Current()->SetNonStandardExitsEnabled();
+ }
}
}
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 43d0b10..48df53a 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -517,7 +517,7 @@
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
val);
}
}
@@ -545,7 +545,7 @@
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_FALSE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
val);
}
}
@@ -572,7 +572,7 @@
self,
jnienv,
art::jni::EncodeArtMethod(method),
- /*was_popped_by_exception*/ static_cast<jboolean>(JNI_TRUE),
+ /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_TRUE),
val);
// Match RI behavior of just throwing away original exception if a new one is thrown.
if (LIKELY(!self->IsExceptionPending())) {
@@ -777,7 +777,7 @@
context.get(),
/*out*/ out_method,
/*out*/ dex_pc);
- clf.WalkStack(/* include_transitions */ false);
+ clf.WalkStack(/* include_transitions= */ false);
}
// Call-back when an exception is thrown.
@@ -793,8 +793,8 @@
FindCatchMethodsFromThrow(self, exception_object, &catch_method, &catch_pc);
uint32_t dex_pc = 0;
art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
- /* check_suspended */ true,
- /* abort_on_error */ art::kIsDebugBuild);
+ /* check_suspended= */ true,
+ /* abort_on_error= */ art::kIsDebugBuild);
ScopedLocalRef<jobject> exception(jnienv,
AddLocalRef<jobject>(jnienv, exception_object.Get()));
RunEventCallback<ArtJvmtiEvent::kException>(
@@ -819,8 +819,8 @@
art::JNIEnvExt* jnienv = self->GetJniEnv();
uint32_t dex_pc;
art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
- /* check_suspended */ true,
- /* abort_on_error */ art::kIsDebugBuild);
+ /* check_suspended= */ true,
+ /* abort_on_error= */ art::kIsDebugBuild);
ScopedLocalRef<jobject> exception(jnienv,
AddLocalRef<jobject>(jnienv, exception_object.Get()));
RunEventCallback<ArtJvmtiEvent::kExceptionCatch>(
@@ -843,16 +843,6 @@
return;
}
- // Call-back for when we get an invokevirtual or an invokeinterface.
- void InvokeVirtualOrInterface(art::Thread* self ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
- art::ArtMethod* caller ATTRIBUTE_UNUSED,
- uint32_t dex_pc ATTRIBUTE_UNUSED,
- art::ArtMethod* callee ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) override {
- return;
- }
-
private:
EventHandler* const event_handler_;
};
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index bf12cb1..9f91a08 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -301,6 +301,13 @@
unsigned char** new_class_data) const
REQUIRES(!envs_lock_);
+ template <ArtJvmtiEvent kEvent>
+ ALWAYS_INLINE inline void DispatchClassLoadOrPrepareEvent(art::Thread* thread,
+ JNIEnv* jnienv,
+ jthread jni_thread,
+ jclass klass) const
+ REQUIRES(!envs_lock_);
+
void HandleEventType(ArtJvmtiEvent event, bool enable);
void HandleLocalAccessCapabilityAdded();
void HandleBreakpointEventsChanged(bool enable);
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index 2ca87fd..6745d91 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -51,17 +51,6 @@
dex_file->CalculateChecksum();
}
-static void UnhideApis(const art::DexFile& target_dex_file) {
- for (art::ClassAccessor accessor : target_dex_file.GetClasses()) {
- for (const art::ClassAccessor::Field& field : accessor.GetFields()) {
- field.UnHideAccessFlags();
- }
- for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
- method.UnHideAccessFlags();
- }
- }
-}
-
static const art::VdexFile* GetVdex(const art::DexFile& original_dex_file) {
const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
if (oat_dex == nullptr) {
@@ -78,9 +67,11 @@
const art::DexFile& original_dex_file) {
const art::VdexFile* vdex = GetVdex(original_dex_file);
if (vdex != nullptr) {
- vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
+ vdex->UnquickenDexFile(new_dex_file,
+ original_dex_file,
+ /* decompile_return_instruction= */ true);
}
- UnhideApis(new_dex_file);
+ new_dex_file.UnhideApis();
}
static void DCheckVerifyDexFile(const art::DexFile& dex) {
@@ -90,7 +81,7 @@
dex.Begin(),
dex.Size(),
"FixedUpDexFile_Verification.dex",
- /*verify_checksum*/ true,
+ /*verify_checksum=*/ true,
&error)) {
LOG(FATAL) << "Failed to verify de-quickened dex file: " << error;
}
@@ -124,9 +115,9 @@
options.class_filter_.insert(descriptor);
}
art::DexLayout dex_layout(options,
- /*info*/ nullptr,
- /*out_file*/ nullptr,
- /*header*/ nullptr);
+ /*info=*/ nullptr,
+ /*out_file=*/ nullptr,
+ /*header=*/ nullptr);
std::unique_ptr<art::DexContainer> dex_container;
bool result = dex_layout.ProcessDexFile(
original.GetLocation().c_str(),
@@ -147,11 +138,11 @@
new_dex_file = dex_file_loader.Open(
data.data(),
data.size(),
- /*location*/"Unquickening_dexfile.dex",
- /*location_checksum*/0,
- /*oat_dex_file*/nullptr,
- /*verify*/false,
- /*verify_checksum*/false,
+ /*location=*/"Unquickening_dexfile.dex",
+ /*location_checksum=*/0,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
&error);
if (new_dex_file == nullptr) {
diff --git a/openjdkjvmti/object_tagging.cc b/openjdkjvmti/object_tagging.cc
index db67079..0a51bf2 100644
--- a/openjdkjvmti/object_tagging.cc
+++ b/openjdkjvmti/object_tagging.cc
@@ -36,6 +36,7 @@
#include "art_jvmti.h"
#include "events-inl.h"
#include "jvmti_weak_table-inl.h"
+#include "mirror/object-inl.h"
namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index f1d6fb0..3d33487 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -113,8 +113,8 @@
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
checksum,
std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
+ /*verify=*/true,
+ /*verify_checksum=*/true,
&error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
@@ -145,7 +145,7 @@
FakeJvmtiDeleter() {}
FakeJvmtiDeleter(FakeJvmtiDeleter&) = default;
- FakeJvmtiDeleter(FakeJvmtiDeleter&&) = default;
+ FakeJvmtiDeleter(FakeJvmtiDeleter&&) noexcept = default;
FakeJvmtiDeleter& operator=(const FakeJvmtiDeleter&) = default;
template <typename U> void operator()(const U* ptr) const {
@@ -267,7 +267,8 @@
}
}
- void ClassLoad(art::Handle<art::mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void ClassLoad(art::Handle<art::mirror::Class> klass) override
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassLoad)) {
art::Thread* thread = art::Thread::Current();
ScopedLocalRef<jclass> jklass(thread->GetJniEnv(),
@@ -289,7 +290,7 @@
void ClassPrepare(art::Handle<art::mirror::Class> temp_klass,
art::Handle<art::mirror::Class> klass)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (event_handler->IsEventEnabledAnywhere(ArtJvmtiEvent::kClassPrepare)) {
art::Thread* thread = art::Thread::Current();
if (temp_klass.Get() != klass.Get()) {
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 895e734..9e8288f 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -246,17 +246,17 @@
mmap_name += name_;
std::string error;
dex_data_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
dequick_size,
PROT_NONE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
&error);
mmap_name += "-TEMP";
temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
dequick_size,
PROT_READ | PROT_WRITE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
&error);
if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
// Need to save the initial dexfile so we don't need to search for it in the fault-handler.
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index c61d6e5..c628a32 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -424,7 +424,7 @@
}
}
return event_handler->SetEvent(art_env,
- /*event_thread*/nullptr,
+ /*thread=*/nullptr,
static_cast<ArtJvmtiEvent>(extension_event_index),
mode);
}
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 85aa946..559ee0d 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -653,6 +653,70 @@
art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
}
+jvmtiError HeapUtil::IterateOverInstancesOfClass(jvmtiEnv* env,
+ jclass klass,
+ jvmtiHeapObjectFilter filter,
+ jvmtiHeapObjectCallback cb,
+ const void* user_data) {
+ if (cb == nullptr || klass == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
+ art::StackHandleScope<1> hs(self);
+
+ art::ObjPtr<art::mirror::Object> klass_ptr(soa.Decode<art::mirror::Class>(klass));
+ if (!klass_ptr->IsClass()) {
+ return ERR(INVALID_CLASS);
+ }
+ art::Handle<art::mirror::Class> filter_klass(hs.NewHandle(klass_ptr->AsClass()));
+ if (filter_klass->IsInterface()) {
+ // nothing is an 'instance' of an interface so just return without walking anything.
+ return OK;
+ }
+
+ ObjectTagTable* tag_table = ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get();
+ bool stop_reports = false;
+ auto visitor = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Early return, as we can't really stop visiting.
+ if (stop_reports) {
+ return;
+ }
+
+ art::ScopedAssertNoThreadSuspension no_suspension("IterateOverInstancesOfClass");
+
+ art::ObjPtr<art::mirror::Class> klass = obj->GetClass();
+
+ if (filter_klass != nullptr && !filter_klass->IsAssignableFrom(klass)) {
+ return;
+ }
+
+ jlong tag = 0;
+ tag_table->GetTag(obj, &tag);
+ if ((filter != JVMTI_HEAP_OBJECT_EITHER) &&
+ ((tag == 0 && filter == JVMTI_HEAP_OBJECT_TAGGED) ||
+ (tag != 0 && filter == JVMTI_HEAP_OBJECT_UNTAGGED))) {
+ return;
+ }
+
+ jlong class_tag = 0;
+ tag_table->GetTag(klass.Ptr(), &class_tag);
+
+ jlong saved_tag = tag;
+ jint ret = cb(class_tag, obj->SizeOf(), &tag, const_cast<void*>(user_data));
+
+ stop_reports = (ret == JVMTI_ITERATION_ABORT);
+
+ if (tag != saved_tag) {
+ tag_table->Set(obj, tag);
+ }
+ };
+ art::Runtime::Current()->GetHeap()->VisitObjects(visitor);
+
+ return OK;
+}
+
template <typename T>
static jvmtiError DoIterateThroughHeap(T fn,
jvmtiEnv* env,
@@ -917,7 +981,9 @@
// TODO: We don't have this info.
if (thread != nullptr) {
ref_info->jni_local.depth = 0;
- art::ArtMethod* method = thread->GetCurrentMethod(nullptr, false /* abort_on_error */);
+ art::ArtMethod* method = thread->GetCurrentMethod(nullptr,
+ /* check_suspended= */ true,
+ /* abort_on_error= */ false);
if (method != nullptr) {
ref_info->jni_local.method = art::jni::EncodeArtMethod(method);
}
@@ -948,7 +1014,7 @@
ref_info->stack_local.slot = static_cast<jint>(java_info.GetVReg());
const art::StackVisitor* visitor = java_info.GetVisitor();
ref_info->stack_local.location =
- static_cast<jlocation>(visitor->GetDexPc(false /* abort_on_failure */));
+ static_cast<jlocation>(visitor->GetDexPc(/* abort_on_failure= */ false));
ref_info->stack_local.depth = static_cast<jint>(visitor->GetFrameDepth());
art::ArtMethod* method = visitor->GetMethod();
if (method != nullptr) {
@@ -1383,7 +1449,7 @@
}
jvmtiError HeapUtil::ForceGarbageCollection(jvmtiEnv* env ATTRIBUTE_UNUSED) {
- art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ false);
+ art::Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ false);
return ERR(NONE);
}
diff --git a/openjdkjvmti/ti_heap.h b/openjdkjvmti/ti_heap.h
index 62761b5..382d80f 100644
--- a/openjdkjvmti/ti_heap.h
+++ b/openjdkjvmti/ti_heap.h
@@ -30,6 +30,12 @@
jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr);
+ jvmtiError IterateOverInstancesOfClass(jvmtiEnv* env,
+ jclass klass,
+ jvmtiHeapObjectFilter filter,
+ jvmtiHeapObjectCallback cb,
+ const void* user_data);
+
jvmtiError IterateThroughHeap(jvmtiEnv* env,
jint heap_filter,
jclass klass,
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 1588df4..7d69c89 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -206,76 +206,59 @@
return ERR(ABSENT_INFORMATION);
}
- struct LocalVariableContext {
- explicit LocalVariableContext(jvmtiEnv* jenv) : env_(jenv), variables_(), err_(OK) {}
+ std::vector<jvmtiLocalVariableEntry> variables;
+ jvmtiError err = OK;
- static void Callback(void* raw_ctx, const art::DexFile::LocalInfo& entry) {
- reinterpret_cast<LocalVariableContext*>(raw_ctx)->Insert(entry);
+ auto release = [&](jint* out_entry_count_ptr, jvmtiLocalVariableEntry** out_table_ptr) {
+ jlong table_size = sizeof(jvmtiLocalVariableEntry) * variables.size();
+ if (err != OK ||
+ (err = env->Allocate(table_size,
+ reinterpret_cast<unsigned char**>(out_table_ptr))) != OK) {
+ for (jvmtiLocalVariableEntry& e : variables) {
+ env->Deallocate(reinterpret_cast<unsigned char*>(e.name));
+ env->Deallocate(reinterpret_cast<unsigned char*>(e.signature));
+ env->Deallocate(reinterpret_cast<unsigned char*>(e.generic_signature));
+ }
+ return err;
}
-
- void Insert(const art::DexFile::LocalInfo& entry) {
- if (err_ != OK) {
- return;
- }
- JvmtiUniquePtr<char[]> name_str = CopyString(env_, entry.name_, &err_);
- if (err_ != OK) {
- return;
- }
- JvmtiUniquePtr<char[]> sig_str = CopyString(env_, entry.descriptor_, &err_);
- if (err_ != OK) {
- return;
- }
- JvmtiUniquePtr<char[]> generic_sig_str = CopyString(env_, entry.signature_, &err_);
- if (err_ != OK) {
- return;
- }
- variables_.push_back({
- .start_location = static_cast<jlocation>(entry.start_address_),
- .length = static_cast<jint>(entry.end_address_ - entry.start_address_),
- .name = name_str.release(),
- .signature = sig_str.release(),
- .generic_signature = generic_sig_str.release(),
- .slot = entry.reg_,
- });
- }
-
- jvmtiError Release(jint* out_entry_count_ptr, jvmtiLocalVariableEntry** out_table_ptr) {
- jlong table_size = sizeof(jvmtiLocalVariableEntry) * variables_.size();
- if (err_ != OK ||
- (err_ = env_->Allocate(table_size,
- reinterpret_cast<unsigned char**>(out_table_ptr))) != OK) {
- Cleanup();
- return err_;
- } else {
- *out_entry_count_ptr = variables_.size();
- memcpy(*out_table_ptr, variables_.data(), table_size);
- return OK;
- }
- }
-
- void Cleanup() {
- for (jvmtiLocalVariableEntry& e : variables_) {
- env_->Deallocate(reinterpret_cast<unsigned char*>(e.name));
- env_->Deallocate(reinterpret_cast<unsigned char*>(e.signature));
- env_->Deallocate(reinterpret_cast<unsigned char*>(e.generic_signature));
- }
- }
-
- jvmtiEnv* env_;
- std::vector<jvmtiLocalVariableEntry> variables_;
- jvmtiError err_;
+ *out_entry_count_ptr = variables.size();
+ memcpy(*out_table_ptr, variables.data(), table_size);
+ return OK;
};
- LocalVariableContext context(env);
+ auto visitor = [&](const art::DexFile::LocalInfo& entry) {
+ if (err != OK) {
+ return;
+ }
+ JvmtiUniquePtr<char[]> name_str = CopyString(env, entry.name_, &err);
+ if (err != OK) {
+ return;
+ }
+ JvmtiUniquePtr<char[]> sig_str = CopyString(env, entry.descriptor_, &err);
+ if (err != OK) {
+ return;
+ }
+ JvmtiUniquePtr<char[]> generic_sig_str = CopyString(env, entry.signature_, &err);
+ if (err != OK) {
+ return;
+ }
+ variables.push_back({
+ .start_location = static_cast<jlocation>(entry.start_address_),
+ .length = static_cast<jint>(entry.end_address_ - entry.start_address_),
+ .name = name_str.release(),
+ .signature = sig_str.release(),
+ .generic_signature = generic_sig_str.release(),
+ .slot = entry.reg_,
+ });
+ };
+
if (!accessor.DecodeDebugLocalInfo(art_method->IsStatic(),
art_method->GetDexMethodIndex(),
- LocalVariableContext::Callback,
- &context)) {
+ visitor)) {
// Something went wrong with decoding the debug information. It might as well not be there.
return ERR(ABSENT_INFORMATION);
- } else {
- return context.Release(entry_count_ptr, table_ptr);
}
+ return release(entry_count_ptr, table_ptr);
}
jvmtiError MethodUtil::GetMaxLocals(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -446,16 +429,6 @@
return ERR(NONE);
}
-using LineNumberContext = std::vector<jvmtiLineNumberEntry>;
-
-static bool CollectLineNumbers(void* void_context, const art::DexFile::PositionInfo& entry) {
- LineNumberContext* context = reinterpret_cast<LineNumberContext*>(void_context);
- jvmtiLineNumberEntry jvmti_entry = { static_cast<jlocation>(entry.address_),
- static_cast<jint>(entry.line_) };
- context->push_back(jvmti_entry);
- return false; // Collect all, no early exit.
-}
-
jvmtiError MethodUtil::GetLineNumberTable(jvmtiEnv* env,
jmethodID method,
jint* entry_count_ptr,
@@ -486,9 +459,11 @@
DCHECK(accessor.HasCodeItem()) << art_method->PrettyMethod() << " " << dex_file->GetLocation();
}
- LineNumberContext context;
- bool success = dex_file->DecodeDebugPositionInfo(
- accessor.DebugInfoOffset(), CollectLineNumbers, &context);
+ std::vector<jvmtiLineNumberEntry> context;
+ bool success = accessor.DecodeDebugPositionInfo([&](const art::DexFile::PositionInfo& entry) {
+ context.push_back({static_cast<jlocation>(entry.address_), static_cast<jint>(entry.line_)});
+ return false;
+ });
if (!success) {
return ERR(ABSENT_INFORMATION);
}
@@ -572,7 +547,7 @@
return;
}
bool needs_instrument = !visitor.IsShadowFrame();
- uint32_t pc = visitor.GetDexPc(/*abort_on_failure*/ false);
+ uint32_t pc = visitor.GetDexPc(/*abort_on_failure=*/ false);
if (pc == art::dex::kDexNoIndex) {
// Cannot figure out current PC.
result_ = ERR(OPAQUE_FRAME);
@@ -622,55 +597,25 @@
if (!accessor.HasCodeItem()) {
return ERR(OPAQUE_FRAME);
}
-
- struct GetLocalVariableInfoContext {
- explicit GetLocalVariableInfoContext(jint slot,
- uint32_t pc,
- std::string* out_descriptor,
- art::Primitive::Type* out_type)
- : found_(false), jslot_(slot), pc_(pc), descriptor_(out_descriptor), type_(out_type) {
- *descriptor_ = "";
- *type_ = art::Primitive::kPrimVoid;
+ bool found = false;
+ *type = art::Primitive::kPrimVoid;
+ descriptor->clear();
+ auto visitor = [&](const art::DexFile::LocalInfo& entry) {
+ if (!found &&
+ entry.start_address_ <= dex_pc &&
+ entry.end_address_ > dex_pc &&
+ entry.reg_ == slot_) {
+ found = true;
+ *type = art::Primitive::GetType(entry.descriptor_[0]);
+ *descriptor = entry.descriptor_;
}
-
- static void Callback(void* raw_ctx, const art::DexFile::LocalInfo& entry) {
- reinterpret_cast<GetLocalVariableInfoContext*>(raw_ctx)->Handle(entry);
- }
-
- void Handle(const art::DexFile::LocalInfo& entry) {
- if (found_) {
- return;
- } else if (entry.start_address_ <= pc_ &&
- entry.end_address_ > pc_ &&
- entry.reg_ == jslot_) {
- found_ = true;
- *type_ = art::Primitive::GetType(entry.descriptor_[0]);
- *descriptor_ = entry.descriptor_;
- }
- return;
- }
-
- bool found_;
- jint jslot_;
- uint32_t pc_;
- std::string* descriptor_;
- art::Primitive::Type* type_;
};
-
- GetLocalVariableInfoContext context(slot_, dex_pc, descriptor, type);
- if (!dex_file->DecodeDebugLocalInfo(accessor.RegistersSize(),
- accessor.InsSize(),
- accessor.InsnsSizeInCodeUnits(),
- accessor.DebugInfoOffset(),
- method->IsStatic(),
- method->GetDexMethodIndex(),
- GetLocalVariableInfoContext::Callback,
- &context) || !context.found_) {
+ if (!accessor.DecodeDebugLocalInfo(method->IsStatic(), method->GetDexMethodIndex(), visitor) ||
+ !found) {
// Something went wrong with decoding the debug information. It might as well not be there.
return ERR(INVALID_SLOT);
- } else {
- return OK;
}
+ return OK;
}
jvmtiError result_;
@@ -689,7 +634,7 @@
val_(val),
obj_val_(nullptr) {}
- virtual jvmtiError GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ jvmtiError GetResult() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (result_ == OK && type_ == art::Primitive::kPrimNot) {
val_->l = obj_val_.IsNull()
? nullptr
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 6d3a37e..aac7233 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -38,6 +38,7 @@
#include "art_jvmti.h"
#include "gc_root-inl.h"
+#include "mirror/object-inl.h"
#include "monitor.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
@@ -190,7 +191,7 @@
// Reaquire the mutex/monitor, also go to sleep if we were suspended.
// TODO Give an extension to wait without suspension as well.
- MonitorEnter(self, /*suspend*/ true);
+ MonitorEnter(self, /*suspend=*/ true);
CHECK(owner_.load(std::memory_order_relaxed) == self);
DCHECK_EQ(1u, count_);
// Reset the count.
@@ -260,7 +261,7 @@
JvmtiMonitor* monitor = DecodeMonitor(id);
art::Thread* self = art::Thread::Current();
- monitor->MonitorEnter(self, /*suspend*/false);
+ monitor->MonitorEnter(self, /*suspend=*/false);
return ERR(NONE);
}
@@ -273,7 +274,7 @@
JvmtiMonitor* monitor = DecodeMonitor(id);
art::Thread* self = art::Thread::Current();
- monitor->MonitorEnter(self, /*suspend*/true);
+ monitor->MonitorEnter(self, /*suspend=*/true);
return ERR(NONE);
}
@@ -370,7 +371,7 @@
public:
GetContendedMonitorClosure() : out_(nullptr) {}
- void Run(art::Thread* target_thread) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* target_thread) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ScopedAssertNoThreadSuspension sants("GetContendedMonitorClosure::Run");
switch (target_thread->GetState()) {
// These three we are actually currently waiting on a monitor and have sent the appropriate
diff --git a/openjdkjvmti/ti_object.cc b/openjdkjvmti/ti_object.cc
index 89ce352..344ae88 100644
--- a/openjdkjvmti/ti_object.cc
+++ b/openjdkjvmti/ti_object.cc
@@ -92,7 +92,7 @@
{
art::ScopedObjectAccess soa(self); // Now we know we have the shared lock.
art::ScopedThreadSuspension sts(self, art::kNative);
- art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend*/false);
+ art::ScopedSuspendAll ssa("GetObjectMonitorUsage", /*long_suspend=*/false);
art::ObjPtr<art::mirror::Object> target(self->DecodeJObject(obj));
// This gets the list of threads trying to lock or wait on the monitor.
art::MonitorInfo info(target.Ptr());
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 2ec2f04..7525c02 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -152,7 +152,7 @@
const std::unordered_set<art::ArtMethod*>& obsoleted_methods,
ObsoleteMap* obsolete_maps)
: StackVisitor(thread,
- /*context*/nullptr,
+ /*context=*/nullptr,
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
allocator_(allocator),
obsoleted_methods_(obsoleted_methods),
@@ -305,10 +305,10 @@
std::string* error_msg) {
art::MemMap map = art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
- /* addr */ nullptr,
+ /* addr= */ nullptr,
data.size(),
PROT_READ|PROT_WRITE,
- /*low_4gb*/ false,
+ /*low_4gb=*/ false,
error_msg);
if (LIKELY(map.IsValid())) {
memcpy(map.Begin(), data.data(), data.size());
@@ -445,8 +445,8 @@
std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(name,
checksum,
std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
+ /*verify=*/true,
+ /*verify_checksum=*/true,
error_msg_));
if (dex_file.get() == nullptr) {
os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
@@ -1117,11 +1117,12 @@
dex_file_.get(),
hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
- dex_file_->GetClassDef(0), /*class_def*/
- nullptr, /*compiler_callbacks*/
- true, /*allow_soft_failures*/
- /*log_level*/
+ /*class_def=*/ dex_file_->GetClassDef(0),
+ /*callbacks=*/ nullptr,
+ /*allow_soft_failures=*/ true,
+ /*log_level=*/
art::verifier::HardFailLogMode::kLogWarning,
+ art::Runtime::Current()->GetTargetSdkVersion(),
&error);
switch (failure) {
case art::verifier::FailureKind::kNoFailure:
@@ -1287,7 +1288,7 @@
}
void Redefiner::ClassRedefinition::ReleaseDexFile() {
- dex_file_.release();
+ dex_file_.release(); // NOLINT b/117926937
}
void Redefiner::ReleaseAllDexFiles() {
@@ -1366,7 +1367,7 @@
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
- art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend=*/true);
for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
ClassRedefinition& redef = data.GetRedefinition();
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 1189b1d..427869e 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -229,8 +229,12 @@
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
const art::ArtDexFileLoader dex_file_loader;
- if (!dex_file_loader.Open(
- segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
+ if (!dex_file_loader.Open(segment,
+ segment,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
+ &error_msg,
+ &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index b6969af..1279f3b 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -77,9 +77,9 @@
start(start_),
stop(stop_) {}
GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
- GetStackTraceVisitor(GetStackTraceVisitor&&) = default;
+ GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
- bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
@@ -112,6 +112,23 @@
size_t stop;
};
+art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
+ art::ShadowFrame* cur = GetCurrentShadowFrame();
+ if (cur == nullptr) {
+ *created_frame = true;
+ art::ArtMethod* method = GetMethod();
+ const uint16_t num_regs = method->DexInstructionData().RegistersSize();
+ cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
+ num_regs,
+ method,
+ GetDexPc());
+ DCHECK(cur != nullptr);
+ } else {
+ *created_frame = false;
+ }
+ return cur;
+}
+
template <typename FrameFn>
GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
size_t start,
@@ -133,7 +150,7 @@
frames.push_back(info);
};
auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
start_result = visitor.start;
stop_result = visitor.stop;
@@ -201,7 +218,7 @@
++index;
};
auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
}
jvmtiFrameInfo* frame_buffer;
@@ -313,7 +330,7 @@
thread_frames->push_back(info);
};
auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
}
art::Barrier barrier;
@@ -662,7 +679,7 @@
: art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
count(0) {}
- bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* m = GetMethod();
const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
if (do_count) {
@@ -734,7 +751,7 @@
caller(nullptr),
caller_dex_pc(0) {}
- bool VisitFrame() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::ArtMethod* m = GetMethod();
const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
if (do_count) {
@@ -893,7 +910,7 @@
art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
// Find the monitors on the stack.
MonitorVisitor visitor(target);
- visitor.WalkStack(/* include_transitions */ false);
+ visitor.WalkStack(/* include_transitions= */ false);
// Find any other monitors, including ones acquired in native code.
art::RootInfo root_info(art::kRootVMInternal);
target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
@@ -1065,16 +1082,7 @@
// From here we are sure to succeed.
bool needs_instrument = false;
// Get/create a shadow frame
- art::ShadowFrame* shadow_frame = visitor.GetCurrentShadowFrame();
- if (shadow_frame == nullptr) {
- needs_instrument = true;
- const size_t frame_id = visitor.GetFrameId();
- const uint16_t num_regs = method->DexInstructionData().RegistersSize();
- shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
- num_regs,
- method,
- visitor.GetDexPc());
- }
+ art::ShadowFrame* shadow_frame = visitor.GetOrCreateShadowFrame(&needs_instrument);
{
art::WriterMutexLock lk(self, tienv->event_info_mutex_);
// Mark shadow frame as needs_notify_pop_
@@ -1089,4 +1097,88 @@
} while (true);
}
+jvmtiError StackUtil::PopFrame(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread thread) {
+ art::Thread* self = art::Thread::Current();
+ art::Thread* target;
+ do {
+ ThreadUtil::SuspendCheck(self);
+ art::MutexLock ucsl_mu(self, *art::Locks::user_code_suspension_lock_);
+ // Make sure we won't be suspended in the middle of holding the thread_suspend_count_lock_ by a
+ // user-code suspension. We retry and do another SuspendCheck to clear this.
+ if (ThreadUtil::WouldSuspendForUserCodeLocked(self)) {
+ continue;
+ }
+ // From now on we know we cannot get suspended by user-code.
+ // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+ // have the 'suspend_lock' locked here.
+ art::ScopedObjectAccess soa(self);
+ art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ jvmtiError err = ERR(INTERNAL);
+ if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+ return err;
+ }
+ {
+ art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (target == self || target->GetUserCodeSuspendCount() == 0) {
+ // We cannot be the current thread for this function.
+ return ERR(THREAD_NOT_SUSPENDED);
+ }
+ }
+ JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
+ constexpr art::StackVisitor::StackWalkKind kWalkKind =
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
+ if (tls_data != nullptr &&
+ tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
+ tls_data->disable_pop_frame_depth == art::StackVisitor::ComputeNumFrames(target,
+ kWalkKind)) {
+ LOG(WARNING) << "Disallowing frame pop due to in-progress class-load/prepare. Frame at depth "
+ << tls_data->disable_pop_frame_depth << " was marked as un-poppable by the "
+ << "jvmti plugin. See b/117615146 for more information.";
+ return ERR(OPAQUE_FRAME);
+ }
+ // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
+ // done.
+ std::unique_ptr<art::Context> context(art::Context::Create());
+ FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
+ FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
+ final_frame.WalkStack();
+ penultimate_frame.WalkStack();
+
+ if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
+ // Cannot do it if there is only one frame!
+ return ERR(NO_MORE_FRAMES);
+ }
+
+ art::ArtMethod* called_method = final_frame.GetMethod();
+ art::ArtMethod* calling_method = penultimate_frame.GetMethod();
+ if (calling_method->IsNative() || called_method->IsNative()) {
+ return ERR(OPAQUE_FRAME);
+ }
+ // From here we are sure to succeed.
+
+ // Get/create a shadow frame
+ bool created_final_frame = false;
+ bool created_penultimate_frame = false;
+ art::ShadowFrame* called_shadow_frame =
+ final_frame.GetOrCreateShadowFrame(&created_final_frame);
+ art::ShadowFrame* calling_shadow_frame =
+ penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
+
+ CHECK_NE(called_shadow_frame, calling_shadow_frame)
+ << "Frames at different depths not different!";
+
+ // Tell the shadow-frame to return immediately and skip all exit events.
+ called_shadow_frame->SetForcePopFrame(true);
+ calling_shadow_frame->SetForceRetryInstruction(true);
+
+ // Make sure can we will go to the interpreter and use the shadow frames. The early return for
+ // the final frame will force everything to the interpreter so we only need to instrument if it
+ // was not present.
+ if (created_final_frame) {
+ DeoptManager::Get()->DeoptimizeThread(target);
+ }
+ return OK;
+ } while (true);
+}
+
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.h b/openjdkjvmti/ti_stack.h
index b41fa4b..55c4269 100644
--- a/openjdkjvmti/ti_stack.h
+++ b/openjdkjvmti/ti_stack.h
@@ -81,6 +81,8 @@
jobject** owned_monitors_ptr);
static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth);
+
+ static jvmtiError PopFrame(jvmtiEnv* env, jthread thread);
};
struct FindFrameAtDepthVisitor : art::StackVisitor {
@@ -110,6 +112,9 @@
}
}
+ art::ShadowFrame* GetOrCreateShadowFrame(/*out*/bool* created_frame)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
bool found_frame_;
size_t cnt_;
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index e533094..2131120 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -623,18 +623,10 @@
return ERR(NONE);
}
-// The struct that we store in the art::Thread::custom_tls_ that maps the jvmtiEnvs to the data
-// stored with that thread. This is needed since different jvmtiEnvs are not supposed to share TLS
-// data but we only have a single slot in Thread objects to store data.
-struct JvmtiGlobalTLSData : public art::TLSData {
- std::unordered_map<jvmtiEnv*, const void*> data GUARDED_BY(art::Locks::thread_list_lock_);
-};
-
static void RemoveTLSData(art::Thread* target, void* ctx) REQUIRES(art::Locks::thread_list_lock_) {
jvmtiEnv* env = reinterpret_cast<jvmtiEnv*>(ctx);
art::Locks::thread_list_lock_->AssertHeld(art::Thread::Current());
- JvmtiGlobalTLSData* global_tls =
- reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
+ JvmtiGlobalTLSData* global_tls = ThreadUtil::GetGlobalTLSData(target);
if (global_tls != nullptr) {
global_tls->data.erase(env);
}
@@ -657,19 +649,27 @@
return err;
}
- JvmtiGlobalTLSData* global_tls =
- reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
- if (global_tls == nullptr) {
- // Synchronized using thread_list_lock_ to prevent racing sets.
- target->SetCustomTLS(kJvmtiTlsKey, new JvmtiGlobalTLSData);
- global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
- }
+ JvmtiGlobalTLSData* global_tls = GetOrCreateGlobalTLSData(target);
global_tls->data[env] = data;
return ERR(NONE);
}
+JvmtiGlobalTLSData* ThreadUtil::GetOrCreateGlobalTLSData(art::Thread* thread) {
+ JvmtiGlobalTLSData* data = GetGlobalTLSData(thread);
+ if (data != nullptr) {
+ return data;
+ } else {
+ thread->SetCustomTLS(kJvmtiTlsKey, new JvmtiGlobalTLSData);
+ return GetGlobalTLSData(thread);
+ }
+}
+
+JvmtiGlobalTLSData* ThreadUtil::GetGlobalTLSData(art::Thread* thread) {
+ return reinterpret_cast<JvmtiGlobalTLSData*>(thread->GetCustomTLS(kJvmtiTlsKey));
+}
+
jvmtiError ThreadUtil::GetThreadLocalStorage(jvmtiEnv* env,
jthread thread,
void** data_ptr) {
@@ -686,8 +686,7 @@
return err;
}
- JvmtiGlobalTLSData* global_tls =
- reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
+ JvmtiGlobalTLSData* global_tls = GetGlobalTLSData(target);
if (global_tls == nullptr) {
*data_ptr = nullptr;
return OK;
@@ -813,42 +812,11 @@
runtime->EndThreadBirth();
return ERR(INTERNAL);
}
- data.release();
+ data.release(); // NOLINT pthreads API.
return ERR(NONE);
}
-class ScopedSuspendByPeer {
- public:
- explicit ScopedSuspendByPeer(jthread jtarget)
- : thread_list_(art::Runtime::Current()->GetThreadList()),
- timeout_(false),
- target_(thread_list_->SuspendThreadByPeer(jtarget,
- /* suspend_thread */ true,
- art::SuspendReason::kInternal,
- &timeout_)) { }
- ~ScopedSuspendByPeer() {
- if (target_ != nullptr) {
- if (!thread_list_->Resume(target_, art::SuspendReason::kInternal)) {
- LOG(ERROR) << "Failed to resume " << target_ << "!";
- }
- }
- }
-
- art::Thread* GetTargetThread() const {
- return target_;
- }
-
- bool TimedOut() const {
- return timeout_;
- }
-
- private:
- art::ThreadList* thread_list_;
- bool timeout_;
- art::Thread* target_;
-};
-
jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
jthread target_jthread) {
// Loop since we need to bail out and try again if we would end up getting suspended while holding
@@ -876,27 +844,29 @@
if (!GetAliveNativeThread(target_jthread, soa, &target, &err)) {
return err;
}
+ art::ThreadState state = target->GetState();
+ if (state == art::ThreadState::kStarting || target->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ } else {
+ art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (target->GetUserCodeSuspendCount() != 0) {
+ return ERR(THREAD_SUSPENDED);
+ }
+ }
}
- // Get the actual thread in a suspended state so we can change the user-code suspend count.
- ScopedSuspendByPeer ssbp(target_jthread);
- if (ssbp.GetTargetThread() == nullptr && !ssbp.TimedOut()) {
+ bool timeout = true;
+ art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
+ target_jthread,
+ /* request_suspension= */ true,
+ art::SuspendReason::kForUserCode,
+ &timeout);
+ if (ret_target == nullptr && !timeout) {
// TODO It would be good to get more information about why exactly the thread failed to
// suspend.
return ERR(INTERNAL);
- } else if (!ssbp.TimedOut()) {
- art::ThreadState state = ssbp.GetTargetThread()->GetState();
- if (state == art::ThreadState::kStarting || ssbp.GetTargetThread()->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- }
- // we didn't time out and got a result. Suspend the thread by usercode and return. It's
- // already suspended internal so we don't need to do anything but increment the count.
- art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() != 0) {
- return ERR(THREAD_SUSPENDED);
- }
- bool res = ssbp.GetTargetThread()->ModifySuspendCount(
- self, +1, nullptr, art::SuspendReason::kForUserCode);
- return res ? OK : ERR(INTERNAL);
+ } else if (!timeout) {
+ // we didn't time out and got a result.
+ return OK;
}
// We timed out. Just go around and try again.
} while (true);
@@ -905,17 +875,6 @@
jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
CHECK(self == art::Thread::Current());
- if (!self->CanBeSuspendedByUserCode()) {
- // TODO This is really undesirable. As far as I can tell this is can only come about because of
- // class-loads in the jit-threads (through either VMObjectAlloc or the ClassLoad/ClassPrepare
- // events that we send). It's unlikely that anyone would be suspending themselves there since
- // it's almost guaranteed to cause a deadlock but it is technically allowed. Ideally we'd want
- // to put a CHECK here (or in the event-dispatch code) that we are only in this situation when
- // sending the GC callbacks but the jit causing events means we cannot do this.
- LOG(WARNING) << "Attempt to self-suspend on a thread without suspension enabled. Thread is "
- << *self;
- return ERR(INTERNAL);
- }
{
art::MutexLock mu(self, *art::Locks::user_code_suspension_lock_);
art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
@@ -963,6 +922,7 @@
return ERR(NULL_POINTER);
}
art::Thread* self = art::Thread::Current();
+ art::Thread* target;
// Retry until we know we won't get suspended by user code while resuming something.
do {
SuspendCheck(self);
@@ -973,37 +933,36 @@
continue;
}
// From now on we know we cannot get suspended by user-code.
- // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
- // have the 'suspend_lock' locked here.
- art::ScopedObjectAccess soa(self);
- if (thread == nullptr) {
- // The thread is the current thread.
- return ERR(THREAD_NOT_SUSPENDED);
- } else if (!soa.Env()->IsInstanceOf(thread, art::WellKnownClasses::java_lang_Thread)) {
- // Not a thread object.
- return ERR(INVALID_THREAD);
- } else if (self->GetPeer() == soa.Decode<art::mirror::Object>(thread)) {
- // The thread is the current thread.
- return ERR(THREAD_NOT_SUSPENDED);
+ {
+ // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+ // have the 'suspend_lock' locked here.
+ art::ScopedObjectAccess soa(self);
+ art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+ jvmtiError err = ERR(INTERNAL);
+ if (!GetAliveNativeThread(thread, soa, &target, &err)) {
+ return err;
+ } else if (target == self) {
+ // We would have paused until we aren't suspended anymore due to the ScopedObjectAccess so
+ // we can just return THREAD_NOT_SUSPENDED. Unfortunately we cannot do any real DCHECKs
+ // about current state since it's all concurrent.
+ return ERR(THREAD_NOT_SUSPENDED);
+ }
+ // The JVMTI spec requires us to return THREAD_NOT_SUSPENDED if it is alive but we really
+ // cannot tell why resume failed.
+ {
+ art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (target->GetUserCodeSuspendCount() == 0) {
+ return ERR(THREAD_NOT_SUSPENDED);
+ }
+ }
}
- ScopedSuspendByPeer ssbp(thread);
- if (ssbp.TimedOut()) {
- // Unknown error. Couldn't suspend thread!
- return ERR(INTERNAL);
- } else if (ssbp.GetTargetThread() == nullptr) {
- // Thread must not be alive.
- return ERR(THREAD_NOT_ALIVE);
- }
- // We didn't time out and got a result. Check the thread is suspended by usercode, unsuspend it
- // and return. It's already suspended internal so we don't need to do anything but decrement the
- // count.
- art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() == 0) {
- return ERR(THREAD_NOT_SUSPENDED);
- } else if (!ssbp.GetTargetThread()->ModifySuspendCount(
- self, -1, nullptr, art::SuspendReason::kForUserCode)) {
+ // It is okay that we don't have a thread_list_lock here since we know that the thread cannot
+ // die since it is currently held suspended by a SuspendReason::kForUserCode suspend.
+ DCHECK(target != self);
+ if (!art::Runtime::Current()->GetThreadList()->Resume(target,
+ art::SuspendReason::kForUserCode)) {
// TODO Give a better error.
- // This should not really be possible and is probably some race.
+ // This is most likely THREAD_NOT_SUSPENDED but we cannot really be sure.
return ERR(INTERNAL);
} else {
return OK;
@@ -1110,7 +1069,7 @@
public:
explicit StopThreadClosure(art::Handle<art::mirror::Throwable> except) : exception_(except) { }
- void Run(art::Thread* me) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void Run(art::Thread* me) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Make sure the thread is prepared to notice the exception.
art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(me);
me->SetAsyncException(exception_.Get());
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index c6b6af1..39f1f07 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -32,11 +32,14 @@
#ifndef ART_OPENJDKJVMTI_TI_THREAD_H_
#define ART_OPENJDKJVMTI_TI_THREAD_H_
+#include <unordered_map>
+
#include "jni.h"
#include "jvmti.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "thread.h"
namespace art {
class ArtField;
@@ -49,6 +52,18 @@
class EventHandler;
+// The struct that we store in the art::Thread::custom_tls_ that maps the jvmtiEnvs to the data
+// stored with that thread. This is needed since different jvmtiEnvs are not supposed to share TLS
+// data but we only have a single slot in Thread objects to store data.
+struct JvmtiGlobalTLSData : public art::TLSData {
+ std::unordered_map<jvmtiEnv*, const void*> data GUARDED_BY(art::Locks::thread_list_lock_);
+
+ // The depth of the last frame where popping using PopFrame it is not allowed. It is set to
+ // kNoDisallowedPopFrame if all frames can be popped. See b/117615146 for more information.
+ static constexpr size_t kNoDisallowedPopFrame = -1;
+ size_t disable_pop_frame_depth = kNoDisallowedPopFrame;
+};
+
class ThreadUtil {
public:
static void Register(EventHandler* event_handler);
@@ -134,6 +149,11 @@
REQUIRES(!art::Locks::user_code_suspension_lock_,
!art::Locks::thread_suspend_count_lock_);
+ static JvmtiGlobalTLSData* GetGlobalTLSData(art::Thread* thread)
+ REQUIRES(art::Locks::thread_list_lock_);
+ static JvmtiGlobalTLSData* GetOrCreateGlobalTLSData(art::Thread* thread)
+ REQUIRES(art::Locks::thread_list_lock_);
+
private:
// We need to make sure only one thread tries to suspend threads at a time so we can get the
// 'suspend-only-once' behavior the spec requires. Internally, ART considers suspension to be a
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index d87ca56..653f944 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -76,7 +76,7 @@
art::LockLevel::kSignalHandlingLock),
class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
uninitialized_class_definitions_lock_) {
- manager->AddHandler(this, /* generated_code */ false);
+ manager->AddHandler(this, /* generated_code= */ false);
}
~TransformationFaultHandler() {
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
deleted file mode 100644
index 13c8f47..0000000
--- a/patchoat/Android.bp
+++ /dev/null
@@ -1,64 +0,0 @@
-//
-// Copyright (C) 2014 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-cc_defaults {
- name: "patchoat-defaults",
- host_supported: true,
- defaults: ["art_defaults"],
- srcs: ["patchoat.cc"],
- target: {
- android: {
- compile_multilib: "prefer32",
- },
- },
- shared_libs: [
- "libartbase",
- "libbase",
- "libcrypto", // For computing the digest of image file
- ],
-}
-
-art_cc_binary {
- name: "patchoat",
- defaults: ["patchoat-defaults"],
- shared_libs: [
- "libart",
- ],
-}
-
-art_cc_binary {
- name: "patchoatd",
- defaults: [
- "art_debug_defaults",
- "patchoat-defaults",
- ],
- shared_libs: [
- "libartd",
- ],
-}
-
-art_cc_test {
- name: "art_patchoat_tests",
- defaults: [
- "art_gtest_defaults",
- ],
- srcs: [
- "patchoat_test.cc",
- ],
- shared_libs: [
- "libcrypto", // For computing the digest of image file
- ],
-}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
deleted file mode 100644
index 02fc925..0000000
--- a/patchoat/patchoat.cc
+++ /dev/null
@@ -1,1350 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "patchoat.h"
-
-#include <openssl/sha.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/file.h"
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "art_field-inl.h"
-#include "art_method-inl.h"
-#include "base/bit_memory_region.h"
-#include "base/dumpable.h"
-#include "base/file_utils.h"
-#include "base/leb128.h"
-#include "base/logging.h" // For InitLogging.
-#include "base/mutex.h"
-#include "base/memory_region.h"
-#include "base/memory_tool.h"
-#include "base/os.h"
-#include "base/scoped_flock.h"
-#include "base/stringpiece.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/utils.h"
-#include "class_root.h"
-#include "elf_file.h"
-#include "elf_file_impl.h"
-#include "elf_utils.h"
-#include "gc/space/image_space.h"
-#include "image-inl.h"
-#include "intern_table.h"
-#include "mirror/dex_cache.h"
-#include "mirror/executable.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "mirror/object-refvisitor-inl.h"
-#include "mirror/reference.h"
-#include "noop_compiler_callbacks.h"
-#include "offsets.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-namespace {
-
-static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
- uint64_t off = 0;
- if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
- return nullptr;
- }
-
- OatHeader* oat_header = reinterpret_cast<OatHeader*>(elf_file->Begin() + off);
- return oat_header;
-}
-
-static File* CreateOrOpen(const char* name) {
- if (OS::FileExists(name)) {
- return OS::OpenFileReadWrite(name);
- } else {
- std::unique_ptr<File> f(OS::CreateEmptyFile(name));
- if (f.get() != nullptr) {
- if (fchmod(f->Fd(), 0644) != 0) {
- PLOG(ERROR) << "Unable to make " << name << " world readable";
- unlink(name);
- return nullptr;
- }
- }
- return f.release();
- }
-}
-
-// Either try to close the file (close=true), or erase it.
-static bool FinishFile(File* file, bool close) {
- if (close) {
- if (file->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close file.";
- return false;
- }
- return true;
- } else {
- file->Erase();
- return false;
- }
-}
-
-static bool SymlinkFile(const std::string& input_filename, const std::string& output_filename) {
- if (input_filename == output_filename) {
- // Input and output are the same, nothing to do.
- return true;
- }
-
- // Unlink the original filename, since we are overwriting it.
- unlink(output_filename.c_str());
-
- // Create a symlink from the source file to the target path.
- if (symlink(input_filename.c_str(), output_filename.c_str()) < 0) {
- PLOG(ERROR) << "Failed to create symlink " << output_filename << " -> " << input_filename;
- return false;
- }
-
- if (kIsDebugBuild) {
- LOG(INFO) << "Created symlink " << output_filename << " -> " << input_filename;
- }
-
- return true;
-}
-
-// Holder class for runtime options and related objects.
-class PatchoatRuntimeOptionsHolder {
- public:
- PatchoatRuntimeOptionsHolder(const std::string& image_location, InstructionSet isa) {
- options_.push_back(std::make_pair("compilercallbacks", &callbacks_));
- img_ = "-Ximage:" + image_location;
- options_.push_back(std::make_pair(img_.c_str(), nullptr));
- isa_name_ = GetInstructionSetString(isa);
- options_.push_back(std::make_pair("imageinstructionset",
- reinterpret_cast<const void*>(isa_name_.c_str())));
- options_.push_back(std::make_pair("-Xno-sig-chain", nullptr));
- // We do not want the runtime to attempt to patch the image.
- options_.push_back(std::make_pair("-Xnorelocate", nullptr));
- // Don't try to compile.
- options_.push_back(std::make_pair("-Xnoimage-dex2oat", nullptr));
- // Do not accept broken image.
- options_.push_back(std::make_pair("-Xno-dex-file-fallback", nullptr));
- }
-
- const RuntimeOptions& GetRuntimeOptions() {
- return options_;
- }
-
- private:
- RuntimeOptions options_;
- NoopCompilerCallbacks callbacks_;
- std::string isa_name_;
- std::string img_;
-};
-
-} // namespace
-
-bool PatchOat::GeneratePatch(
- const MemMap& original,
- const MemMap& relocated,
- std::vector<uint8_t>* output,
- std::string* error_msg) {
- // FORMAT of the patch (aka image relocation) file:
- // * SHA-256 digest (32 bytes) of original/unrelocated file (e.g., the one from /system)
- // * List of monotonically increasing offsets (max value defined by uint32_t) at which relocations
- // occur.
- // Each element is represented as the delta from the previous offset in the list (first element
- // is a delta from 0). Each delta is encoded using unsigned LEB128: little-endian
- // variable-length 7 bits per byte encoding, where all bytes have the highest bit (0x80) set
- // except for the final byte which does not have that bit set. For example, 0x3f is offset 0x3f,
- // whereas 0xbf 0x05 is offset (0x3f & 0x7f) | (0x5 << 7) which is 0x2bf. Most deltas end up
- // being encoding using just one byte, achieving ~4x decrease in relocation file size compared
- // to the encoding where offsets are stored verbatim, as uint32_t.
-
- size_t original_size = original.Size();
- size_t relocated_size = relocated.Size();
- if (original_size != relocated_size) {
- *error_msg =
- StringPrintf(
- "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
- return false;
- }
- if (original_size > UINT32_MAX) {
- *error_msg = StringPrintf("Image too large: %zu" , original_size);
- return false;
- }
-
- const ImageHeader& relocated_header =
- *reinterpret_cast<const ImageHeader*>(relocated.Begin());
- // Offsets are supposed to differ between original and relocated by this value
- off_t expected_diff = relocated_header.GetPatchDelta();
- if (expected_diff == 0) {
- // Can't identify offsets which are supposed to differ due to relocation
- *error_msg = "Relocation delta is 0";
- return false;
- }
-
- const ImageHeader* image_header = reinterpret_cast<const ImageHeader*>(original.Begin());
- if (image_header->GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
- *error_msg = "Unexpected compressed image.";
- return false;
- }
- if (image_header->IsAppImage()) {
- *error_msg = "Unexpected app image.";
- return false;
- }
- if (image_header->GetPointerSize() != PointerSize::k32 &&
- image_header->GetPointerSize() != PointerSize::k64) {
- *error_msg = "Unexpected pointer size.";
- return false;
- }
- static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
- "Expecting heap GC roots and references to have the same size.");
- DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(image_header->GetPointerSize()));
-
- const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
- kPageSize);
- const size_t end_of_bitmap = image_bitmap_offset + image_header->GetImageBitmapSection().Size();
- const ImageSection& relocation_section = image_header->GetImageRelocationsSection();
- MemoryRegion relocations_data(original.Begin() + end_of_bitmap, relocation_section.Size());
- size_t image_end = image_header->GetClassTableSection().End();
- if (!IsAligned<sizeof(GcRoot<mirror::Object>)>(image_end)) {
- *error_msg = StringPrintf("Unaligned image end: %zu", image_end);
- return false;
- }
- size_t num_indexes = image_end / sizeof(GcRoot<mirror::Object>);
- if (relocation_section.Size() != BitsToBytesRoundUp(num_indexes)) {
- *error_msg = StringPrintf("Unexpected size of relocation section: %zu expected: %zu",
- static_cast<size_t>(relocation_section.Size()),
- BitsToBytesRoundUp(num_indexes));
- return false;
- }
- BitMemoryRegion relocation_bitmap(relocations_data, /* bit_offset */ 0u, num_indexes);
-
- // Output the SHA-256 digest of the original
- output->resize(SHA256_DIGEST_LENGTH);
- const uint8_t* original_bytes = original.Begin();
- SHA256(original_bytes, original_size, output->data());
-
- // Check the list of offsets at which the original and patched images differ.
- size_t diff_offset_count = 0;
- const uint8_t* relocated_bytes = relocated.Begin();
- for (size_t index = 0; index != num_indexes; ++index) {
- size_t offset = index * sizeof(GcRoot<mirror::Object>);
- uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
- uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
- off_t diff = relocated_value - original_value;
- if (diff == 0) {
- CHECK(!relocation_bitmap.LoadBit(index));
- continue;
- } else if (diff != expected_diff) {
- *error_msg =
- StringPrintf(
- "Unexpected diff at offset %zu. Expected: %jd, but was: %jd",
- offset,
- (intmax_t) expected_diff,
- (intmax_t) diff);
- return false;
- }
- CHECK(relocation_bitmap.LoadBit(index));
- diff_offset_count++;
- }
- size_t tail_bytes = original_size - image_end;
- CHECK_EQ(memcmp(original_bytes + image_end, relocated_bytes + image_end, tail_bytes), 0);
-
- if (diff_offset_count == 0) {
- *error_msg = "Original and patched images are identical";
- return false;
- }
-
- return true;
-}
-
-static bool WriteRelFile(
- const MemMap& original,
- const MemMap& relocated,
- const std::string& rel_filename,
- std::string* error_msg) {
- std::vector<uint8_t> output;
- if (!PatchOat::GeneratePatch(original, relocated, &output, error_msg)) {
- return false;
- }
-
- std::unique_ptr<File> rel_file(OS::CreateEmptyFileWriteOnly(rel_filename.c_str()));
- if (rel_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to create/open output file %s", rel_filename.c_str());
- return false;
- }
- if (!rel_file->WriteFully(output.data(), output.size())) {
- *error_msg = StringPrintf("Failed to write to %s", rel_filename.c_str());
- return false;
- }
- if (rel_file->FlushCloseOrErase() != 0) {
- *error_msg = StringPrintf("Failed to flush and close %s", rel_filename.c_str());
- return false;
- }
-
- return true;
-}
-
-static bool CheckImageIdenticalToOriginalExceptForRelocation(
- const std::string& relocated_filename,
- const std::string& original_filename,
- std::string* error_msg) {
- *error_msg = "";
- std::string rel_filename = original_filename + ".rel";
- std::unique_ptr<File> rel_file(OS::OpenFileForReading(rel_filename.c_str()));
- if (rel_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open image relocation file %s", rel_filename.c_str());
- return false;
- }
- int64_t rel_size = rel_file->GetLength();
- if (rel_size < 0) {
- *error_msg = StringPrintf("Error while getting size of image relocation file %s",
- rel_filename.c_str());
- return false;
- }
- if (rel_size != SHA256_DIGEST_LENGTH) {
- *error_msg = StringPrintf("Unexpected size of image relocation file %s: %" PRId64
- ", expected %zu",
- rel_filename.c_str(),
- rel_size,
- static_cast<size_t>(SHA256_DIGEST_LENGTH));
- return false;
- }
- std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]);
- if (!rel_file->ReadFully(rel.get(), rel_size)) {
- *error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str());
- return false;
- }
-
- std::unique_ptr<File> image_file(OS::OpenFileForReading(relocated_filename.c_str()));
- if (image_file.get() == nullptr) {
- *error_msg = StringPrintf("Unable to open relocated image file %s",
- relocated_filename.c_str());
- return false;
- }
-
- int64_t image_size = image_file->GetLength();
- if (image_size < 0) {
- *error_msg = StringPrintf("Error while getting size of relocated image file %s",
- relocated_filename.c_str());
- return false;
- }
- if (static_cast<uint64_t>(image_size) < sizeof(ImageHeader)) {
- *error_msg =
- StringPrintf(
- "Relocated image file %s too small: %" PRId64,
- relocated_filename.c_str(), image_size);
- return false;
- }
- if (image_size > std::numeric_limits<uint32_t>::max()) {
- *error_msg =
- StringPrintf(
- "Relocated image file %s too large: %" PRId64, relocated_filename.c_str(), image_size);
- return false;
- }
-
- std::unique_ptr<uint8_t[]> image(new uint8_t[image_size]);
- if (!image_file->ReadFully(image.get(), image_size)) {
- *error_msg = StringPrintf("Failed to read relocated image file %s", relocated_filename.c_str());
- return false;
- }
-
- const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
- if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
- *error_msg = StringPrintf("Unsuported compressed image file %s",
- relocated_filename.c_str());
- return false;
- }
- size_t image_end = image_header.GetClassTableSection().End();
- if (image_end > static_cast<uint64_t>(image_size) || !IsAligned<4u>(image_end)) {
- *error_msg = StringPrintf("Heap size too big or unaligned in image file %s: %zu",
- relocated_filename.c_str(),
- image_end);
- return false;
- }
- size_t number_of_relocation_locations = image_end / 4u;
- const ImageSection& relocation_section = image_header.GetImageRelocationsSection();
- if (relocation_section.Size() != BitsToBytesRoundUp(number_of_relocation_locations)) {
- *error_msg = StringPrintf("Unexpected size of relocation section in image file %s: %zu"
- " expected: %zu",
- relocated_filename.c_str(),
- static_cast<size_t>(relocation_section.Size()),
- BitsToBytesRoundUp(number_of_relocation_locations));
- return false;
- }
- if (relocation_section.End() != image_size) {
- *error_msg = StringPrintf("Relocation section does not end at file end in image file %s: %zu"
- " expected: %" PRId64,
- relocated_filename.c_str(),
- static_cast<size_t>(relocation_section.End()),
- image_size);
- return false;
- }
-
- off_t expected_diff = image_header.GetPatchDelta();
- if (expected_diff == 0) {
- *error_msg = StringPrintf("Unsuported patch delta of zero in %s",
- relocated_filename.c_str());
- return false;
- }
-
- // Relocated image is expected to differ from the original due to relocation.
- // Unrelocate the image in memory to compensate.
- MemoryRegion relocations(image.get() + relocation_section.Offset(), relocation_section.Size());
- BitMemoryRegion relocation_bitmask(relocations,
- /* bit_offset */ 0u,
- number_of_relocation_locations);
- for (size_t index = 0; index != number_of_relocation_locations; ++index) {
- if (relocation_bitmask.LoadBit(index)) {
- uint32_t* image_value = reinterpret_cast<uint32_t*>(image.get() + index * 4u);
- *image_value -= expected_diff;
- }
- }
-
- // Image in memory is now supposed to be identical to the original. We
- // confirm this by comparing the digest of the in-memory image to the expected
- // digest from relocation file.
- uint8_t image_digest[SHA256_DIGEST_LENGTH];
- SHA256(image.get(), image_size, image_digest);
- if (memcmp(image_digest, rel.get(), SHA256_DIGEST_LENGTH) != 0) {
- *error_msg =
- StringPrintf(
- "Relocated image %s does not match the original %s after unrelocation",
- relocated_filename.c_str(),
- original_filename.c_str());
- return false;
- }
-
- // Relocated image is identical to the original, once relocations are taken into account
- return true;
-}
-
-static bool VerifySymlink(const std::string& intended_target, const std::string& link_name) {
- std::string actual_target;
- if (!android::base::Readlink(link_name, &actual_target)) {
- PLOG(ERROR) << "Readlink on " << link_name << " failed.";
- return false;
- }
- return actual_target == intended_target;
-}
-
-static bool VerifyVdexAndOatSymlinks(const std::string& input_image_filename,
- const std::string& output_image_filename) {
- return VerifySymlink(ImageHeader::GetVdexLocationFromImageLocation(input_image_filename),
- ImageHeader::GetVdexLocationFromImageLocation(output_image_filename))
- && VerifySymlink(ImageHeader::GetOatLocationFromImageLocation(input_image_filename),
- ImageHeader::GetOatLocationFromImageLocation(output_image_filename));
-}
-
-bool PatchOat::CreateVdexAndOatSymlinks(const std::string& input_image_filename,
- const std::string& output_image_filename) {
- std::string input_vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(input_image_filename);
- std::string input_oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(input_image_filename);
-
- std::unique_ptr<File> input_oat_file(OS::OpenFileForReading(input_oat_filename.c_str()));
- if (input_oat_file.get() == nullptr) {
- LOG(ERROR) << "Unable to open input oat file at " << input_oat_filename;
- return false;
- }
- std::string error_msg;
- std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat_file.get(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- &error_msg));
- if (elf.get() == nullptr) {
- LOG(ERROR) << "Unable to open oat file " << input_oat_filename << " : " << error_msg;
- return false;
- }
-
- MaybePic is_oat_pic = IsOatPic(elf.get());
- if (is_oat_pic >= ERROR_FIRST) {
- // Error logged by IsOatPic
- return false;
- } else if (is_oat_pic == NOT_PIC) {
- LOG(ERROR) << "patchoat cannot be used on non-PIC oat file: " << input_oat_filename;
- return false;
- }
-
- CHECK(is_oat_pic == PIC);
-
- std::string output_vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
- std::string output_oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
-
- return SymlinkFile(input_oat_filename, output_oat_filename) &&
- SymlinkFile(input_vdex_filename, output_vdex_filename);
-}
-
-bool PatchOat::Patch(const std::string& image_location,
- off_t delta,
- const std::string& output_image_directory,
- const std::string& output_image_relocation_directory,
- InstructionSet isa,
- TimingLogger* timings) {
- bool output_image = !output_image_directory.empty();
- bool output_image_relocation = !output_image_relocation_directory.empty();
- if ((!output_image) && (!output_image_relocation)) {
- // Nothing to do
- return true;
- }
- if ((output_image_relocation) && (delta == 0)) {
- LOG(ERROR) << "Cannot output image relocation information when requested relocation delta is 0";
- return false;
- }
-
- CHECK(Runtime::Current() == nullptr);
- CHECK(!image_location.empty()) << "image file must have a filename.";
-
- TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
- CHECK_NE(isa, InstructionSet::kNone);
-
- // Set up the runtime
- PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
- if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
- LOG(ERROR) << "Unable to initialize runtime";
- return false;
- }
- std::unique_ptr<Runtime> runtime(Runtime::Current());
-
- // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
- // give it away now and then switch to a more manageable ScopedObjectAccess.
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
- ScopedObjectAccess soa(Thread::Current());
-
- std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- std::map<gc::space::ImageSpace*, MemMap> space_to_memmap_map;
-
- for (size_t i = 0; i < spaces.size(); ++i) {
- t.NewTiming("Image Patching setup");
- gc::space::ImageSpace* space = spaces[i];
- std::string input_image_filename = space->GetImageFilename();
- std::unique_ptr<File> input_image(OS::OpenFileForReading(input_image_filename.c_str()));
- if (input_image.get() == nullptr) {
- LOG(ERROR) << "Unable to open input image file at " << input_image_filename;
- return false;
- }
-
- int64_t image_len = input_image->GetLength();
- if (image_len < 0) {
- LOG(ERROR) << "Error while getting image length";
- return false;
- }
- ImageHeader image_header;
- if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header),
- sizeof(image_header), 0)) {
- LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath();
- }
-
- /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath());
- // Nothing special to do right now since the image always needs to get patched.
- // Perhaps in some far-off future we may have images with relative addresses that are true-PIC.
-
- // Create the map where we will write the image patches to.
- std::string error_msg;
- MemMap image = MemMap::MapFile(image_len,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg);
- if (!image.IsValid()) {
- LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
- return false;
- }
-
-
- space_to_memmap_map.emplace(space, std::move(image));
- PatchOat p = PatchOat(isa,
- &space_to_memmap_map[space],
- space->GetLiveBitmap(),
- space->GetMemMap(),
- delta,
- &space_to_memmap_map,
- timings);
-
- t.NewTiming("Patching image");
- if (!p.PatchImage(i == 0)) {
- LOG(ERROR) << "Failed to patch image file " << input_image_filename;
- return false;
- }
-
- // Write the patched image spaces.
- if (output_image) {
- std::string output_image_filename;
- if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
- output_image_directory.c_str(),
- &output_image_filename,
- &error_msg)) {
- LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
- return false;
- }
-
- if (!CreateVdexAndOatSymlinks(input_image_filename, output_image_filename))
- return false;
-
- t.NewTiming("Writing image");
- std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
- if (output_image_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
- return false;
- }
-
- bool success = p.WriteImage(output_image_file.get());
- success = FinishFile(output_image_file.get(), success);
- if (!success) {
- return false;
- }
- }
-
- if (output_image_relocation) {
- t.NewTiming("Writing image relocation");
- std::string original_image_filename(space->GetImageLocation() + ".rel");
- std::string image_relocation_filename =
- output_image_relocation_directory
- + (android::base::StartsWith(original_image_filename, "/") ? "" : "/")
- + original_image_filename.substr(original_image_filename.find_last_of("/"));
- int64_t input_image_size = input_image->GetLength();
- if (input_image_size < 0) {
- LOG(ERROR) << "Error while getting input image size";
- return false;
- }
- MemMap original = MemMap::MapFile(input_image_size,
- PROT_READ,
- MAP_PRIVATE,
- input_image->Fd(),
- 0,
- /*low_4gb*/false,
- input_image->GetPath().c_str(),
- &error_msg);
- if (!original.IsValid()) {
- LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg;
- return false;
- }
-
- const MemMap* relocated = p.image_;
-
- if (!WriteRelFile(original, *relocated, image_relocation_filename, &error_msg)) {
- LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
- << ": " << error_msg;
- return false;
- }
- }
- }
-
- if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
- // We want to just exit on non-debug builds, not bringing the runtime down
- // in an orderly fashion. So release the following fields.
- runtime.release();
- }
-
- return true;
-}
-
-bool PatchOat::Verify(const std::string& image_location,
- const std::string& output_image_directory,
- InstructionSet isa,
- TimingLogger* timings) {
- if (image_location.empty()) {
- LOG(ERROR) << "Original image file not provided";
- return false;
- }
- if (output_image_directory.empty()) {
- LOG(ERROR) << "Relocated image directory not provided";
- return false;
- }
-
- TimingLogger::ScopedTiming t("Runtime Setup", timings);
-
- CHECK_NE(isa, InstructionSet::kNone);
-
- // Set up the runtime
- PatchoatRuntimeOptionsHolder options_holder(image_location, isa);
- if (!Runtime::Create(options_holder.GetRuntimeOptions(), false)) {
- LOG(ERROR) << "Unable to initialize runtime";
- return false;
- }
- std::unique_ptr<Runtime> runtime(Runtime::Current());
-
- // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
- // give it away now and then switch to a more manageable ScopedObjectAccess.
- Thread::Current()->TransitionFromRunnableToSuspended(kNative);
- ScopedObjectAccess soa(Thread::Current());
-
- t.NewTiming("Image Verification setup");
- std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
-
- // TODO: Check that no other .rel files exist in the original dir
-
- bool success = true;
- std::string image_location_dir = android::base::Dirname(image_location);
- for (size_t i = 0; i < spaces.size(); ++i) {
- gc::space::ImageSpace* space = spaces[i];
-
- std::string relocated_image_filename;
- std::string error_msg;
- if (!GetDalvikCacheFilename(space->GetImageLocation().c_str(),
- output_image_directory.c_str(), &relocated_image_filename, &error_msg)) {
- LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
- success = false;
- break;
- }
- // location: /system/framework/boot.art
- // isa: arm64
- // basename: boot.art
- // original: /system/framework/arm64/boot.art
- // relocation: /system/framework/arm64/boot.art.rel
- std::string original_image_filename =
- GetSystemImageFilename(space->GetImageLocation().c_str(), isa);
-
- if (!CheckImageIdenticalToOriginalExceptForRelocation(
- relocated_image_filename, original_image_filename, &error_msg)) {
- LOG(ERROR) << error_msg;
- success = false;
- break;
- }
-
- if (!VerifyVdexAndOatSymlinks(original_image_filename, relocated_image_filename)) {
- LOG(ERROR) << "Verification of vdex and oat symlinks for "
- << space->GetImageLocation() << " failed.";
- success = false;
- break;
- }
- }
-
- if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
- // We want to just exit on non-debug builds, not bringing the runtime down
- // in an orderly fashion. So release the following fields.
- runtime.release();
- }
-
- return success;
-}
-
-bool PatchOat::WriteImage(File* out) {
- CHECK(out != nullptr);
- TimingLogger::ScopedTiming t("Writing image File", timings_);
- std::string error_msg;
-
- // No error checking here, this is best effort. The locking may or may not
- // succeed and we don't really care either way.
- ScopedFlock img_flock = LockedFile::DupOf(out->Fd(), out->GetPath(),
- true /* read_only_mode */, &error_msg);
-
- CHECK(image_ != nullptr);
- size_t expect = image_->Size();
- if (out->WriteFully(reinterpret_cast<char*>(image_->Begin()), expect) &&
- out->SetLength(expect) == 0) {
- return true;
- } else {
- LOG(ERROR) << "Writing to image file " << out->GetPath() << " failed.";
- return false;
- }
-}
-
-bool PatchOat::IsImagePic(const ImageHeader& image_header, const std::string& image_path) {
- if (!image_header.CompilePic()) {
- if (kIsDebugBuild) {
- LOG(INFO) << "image at location " << image_path << " was *not* compiled pic";
- }
- return false;
- }
-
- if (kIsDebugBuild) {
- LOG(INFO) << "image at location " << image_path << " was compiled PIC";
- }
-
- return true;
-}
-
-PatchOat::MaybePic PatchOat::IsOatPic(const ElfFile* oat_in) {
- if (oat_in == nullptr) {
- LOG(ERROR) << "No ELF input oat fie available";
- return ERROR_OAT_FILE;
- }
-
- const std::string& file_path = oat_in->GetFilePath();
-
- const OatHeader* oat_header = GetOatHeader(oat_in);
- if (oat_header == nullptr) {
- LOG(ERROR) << "Failed to find oat header in oat file " << file_path;
- return ERROR_OAT_FILE;
- }
-
- if (!oat_header->IsValid()) {
- LOG(ERROR) << "Elf file " << file_path << " has an invalid oat header";
- return ERROR_OAT_FILE;
- }
-
- bool is_pic = oat_header->IsPic();
- if (kIsDebugBuild) {
- LOG(INFO) << "Oat file at " << file_path << " is " << (is_pic ? "PIC" : "not pic");
- }
-
- return is_pic ? PIC : NOT_PIC;
-}
-
-class PatchOat::PatchOatArtFieldVisitor : public ArtFieldVisitor {
- public:
- explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
- void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
- dest->SetDeclaringClass(
- patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass().Ptr()));
- }
-
- private:
- PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtFields(const ImageHeader* image_header) {
- PatchOatArtFieldVisitor visitor(this);
- image_header->VisitPackedArtFields(&visitor, heap_->Begin());
-}
-
-class PatchOat::PatchOatArtMethodVisitor : public ArtMethodVisitor {
- public:
- explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
- void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
- patch_oat_->FixupMethod(method, dest);
- }
-
- private:
- PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchArtMethods(const ImageHeader* image_header) {
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- PatchOatArtMethodVisitor visitor(this);
- image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size);
-}
-
-void PatchOat::PatchImTables(const ImageHeader* image_header) {
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- // We can safely walk target image since the conflict tables are independent.
- image_header->VisitPackedImTables(
- [this](ArtMethod* method) {
- return RelocatedAddressOfPointer(method);
- },
- image_->Begin(),
- pointer_size);
-}
-
-void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) {
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- // We can safely walk target image since the conflict tables are independent.
- image_header->VisitPackedImtConflictTables(
- [this](ArtMethod* method) {
- return RelocatedAddressOfPointer(method);
- },
- image_->Begin(),
- pointer_size);
-}
-
-class PatchOat::FixupRootVisitor : public RootVisitor {
- public:
- explicit FixupRootVisitor(const PatchOat* patch_oat) : patch_oat_(patch_oat) {
- }
-
- void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(Locks::mutator_lock_) {
- for (size_t i = 0; i < count; ++i) {
- *roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
- }
- }
-
- void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(Locks::mutator_lock_) {
- for (size_t i = 0; i < count; ++i) {
- roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
- }
- }
-
- private:
- const PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchInternedStrings(const ImageHeader* image_header) {
- const auto& section = image_header->GetInternedStringsSection();
- if (section.Size() == 0) {
- return;
- }
- InternTable temp_table;
- // Note that we require that ReadFromMemory does not make an internal copy of the elements.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- temp_table.AddTableFromMemory(image_->Begin() + section.Offset());
- FixupRootVisitor visitor(this);
- temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots);
-}
-
-void PatchOat::PatchClassTable(const ImageHeader* image_header) {
- const auto& section = image_header->GetClassTableSection();
- if (section.Size() == 0) {
- return;
- }
- // Note that we require that ReadFromMemory does not make an internal copy of the elements.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- ClassTable temp_table;
- temp_table.ReadFromMemory(image_->Begin() + section.Offset());
- FixupRootVisitor visitor(this);
- temp_table.VisitRoots(UnbufferedRootVisitor(&visitor, RootInfo(kRootUnknown)));
-}
-
-
-class PatchOat::RelocatedPointerVisitor {
- public:
- explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
-
- template <typename T>
- T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
- return patch_oat_->RelocatedAddressOfPointer(ptr);
- }
-
- private:
- PatchOat* const patch_oat_;
-};
-
-void PatchOat::PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots) {
- auto* dex_caches = down_cast<mirror::ObjectArray<mirror::DexCache>*>(
- img_roots->Get(ImageHeader::kDexCaches));
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- for (size_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
- auto* orig_dex_cache = dex_caches->GetWithoutChecks(i);
- auto* copy_dex_cache = RelocatedCopyOf(orig_dex_cache);
- // Though the DexCache array fields are usually treated as native pointers, we set the full
- // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
- // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
- // static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
- mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
- mirror::StringDexCacheType* relocated_strings = RelocatedAddressOfPointer(orig_strings);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::StringsOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_strings)));
- if (orig_strings != nullptr) {
- orig_dex_cache->FixupStrings(RelocatedCopyOf(orig_strings), RelocatedPointerVisitor(this));
- }
- mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
- mirror::TypeDexCacheType* relocated_types = RelocatedAddressOfPointer(orig_types);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::ResolvedTypesOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_types)));
- if (orig_types != nullptr) {
- orig_dex_cache->FixupResolvedTypes(RelocatedCopyOf(orig_types),
- RelocatedPointerVisitor(this));
- }
- mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
- mirror::MethodDexCacheType* relocated_methods = RelocatedAddressOfPointer(orig_methods);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::ResolvedMethodsOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_methods)));
- if (orig_methods != nullptr) {
- mirror::MethodDexCacheType* copy_methods = RelocatedCopyOf(orig_methods);
- for (size_t j = 0, num = orig_dex_cache->NumResolvedMethods(); j != num; ++j) {
- mirror::MethodDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(orig_methods, j, pointer_size);
- mirror::MethodDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
- mirror::DexCache::SetNativePairPtrSize(copy_methods, j, copy, pointer_size);
- }
- }
- mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
- mirror::FieldDexCacheType* relocated_fields = RelocatedAddressOfPointer(orig_fields);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::ResolvedFieldsOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_fields)));
- if (orig_fields != nullptr) {
- mirror::FieldDexCacheType* copy_fields = RelocatedCopyOf(orig_fields);
- for (size_t j = 0, num = orig_dex_cache->NumResolvedFields(); j != num; ++j) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(orig_fields, j, pointer_size);
- mirror::FieldDexCachePair copy(RelocatedAddressOfPointer(orig.object), orig.index);
- mirror::DexCache::SetNativePairPtrSize(copy_fields, j, copy, pointer_size);
- }
- }
- mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
- mirror::MethodTypeDexCacheType* relocated_method_types =
- RelocatedAddressOfPointer(orig_method_types);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::ResolvedMethodTypesOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_method_types)));
- if (orig_method_types != nullptr) {
- orig_dex_cache->FixupResolvedMethodTypes(RelocatedCopyOf(orig_method_types),
- RelocatedPointerVisitor(this));
- }
-
- GcRoot<mirror::CallSite>* orig_call_sites = orig_dex_cache->GetResolvedCallSites();
- GcRoot<mirror::CallSite>* relocated_call_sites = RelocatedAddressOfPointer(orig_call_sites);
- copy_dex_cache->SetField64<false>(
- mirror::DexCache::ResolvedCallSitesOffset(),
- static_cast<int64_t>(reinterpret_cast<uintptr_t>(relocated_call_sites)));
- if (orig_call_sites != nullptr) {
- orig_dex_cache->FixupResolvedCallSites(RelocatedCopyOf(orig_call_sites),
- RelocatedPointerVisitor(this));
- }
- }
-}
-
-bool PatchOat::PatchImage(bool primary_image) {
- ImageHeader* image_header = reinterpret_cast<ImageHeader*>(image_->Begin());
- CHECK_GT(image_->Size(), sizeof(ImageHeader));
- // These are the roots from the original file.
- mirror::ObjectArray<mirror::Object>* img_roots = image_header->GetImageRoots().Ptr();
- image_header->RelocateImage(delta_);
-
- PatchArtFields(image_header);
- PatchArtMethods(image_header);
- PatchImTables(image_header);
- PatchImtConflictTables(image_header);
- PatchInternedStrings(image_header);
- PatchClassTable(image_header);
- // Patch dex file int/long arrays which point to ArtFields.
- PatchDexFileArrays(img_roots);
-
- if (primary_image) {
- VisitObject(img_roots);
- }
-
- if (!image_header->IsValid()) {
- LOG(ERROR) << "relocation renders image header invalid";
- return false;
- }
-
- {
- TimingLogger::ScopedTiming t("Walk Bitmap", timings_);
- // Walk the bitmap.
- WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
- auto visitor = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- VisitObject(obj);
- };
- bitmap_->Walk(visitor);
- }
- return true;
-}
-
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Object> obj,
- MemberOffset off,
- bool is_static_unused ATTRIBUTE_UNUSED) const {
- mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
- mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
- copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-void PatchOat::PatchVisitor::operator() (ObjPtr<mirror::Class> cls ATTRIBUTE_UNUSED,
- ObjPtr<mirror::Reference> ref) const {
- MemberOffset off = mirror::Reference::ReferentOffset();
- mirror::Object* referent = ref->GetReferent();
- DCHECK(referent == nullptr ||
- Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(referent)) << referent;
- mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent);
- copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
-}
-
-// Called by PatchImage.
-void PatchOat::VisitObject(mirror::Object* object) {
- mirror::Object* copy = RelocatedCopyOf(object);
- CHECK(copy != nullptr);
- if (kUseBakerReadBarrier) {
- object->AssertReadBarrierState();
- }
- PatchOat::PatchVisitor visitor(this, copy);
- object->VisitReferences<kVerifyNone>(visitor, visitor);
- if (object->IsClass<kVerifyNone>()) {
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- mirror::Class* klass = object->AsClass();
- mirror::Class* copy_klass = down_cast<mirror::Class*>(copy);
- RelocatedPointerVisitor native_visitor(this);
- klass->FixupNativePointers(copy_klass, pointer_size, native_visitor);
- auto* vtable = klass->GetVTable();
- if (vtable != nullptr) {
- vtable->Fixup(RelocatedCopyOfFollowImages(vtable), pointer_size, native_visitor);
- }
- mirror::IfTable* iftable = klass->GetIfTable();
- for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) {
- if (iftable->GetMethodArrayCount(i) > 0) {
- auto* method_array = iftable->GetMethodArray(i);
- CHECK(method_array != nullptr);
- method_array->Fixup(RelocatedCopyOfFollowImages(method_array),
- pointer_size,
- native_visitor);
- }
- }
- } else if (object->GetClass() == GetClassRoot<mirror::Method>() ||
- object->GetClass() == GetClassRoot<mirror::Constructor>()) {
- // Need to go update the ArtMethod.
- auto* dest = down_cast<mirror::Executable*>(copy);
- auto* src = down_cast<mirror::Executable*>(object);
- dest->SetArtMethod(RelocatedAddressOfPointer(src->GetArtMethod()));
- }
-}
-
-void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
- const PointerSize pointer_size = InstructionSetPointerSize(isa_);
- copy->CopyFrom(object, pointer_size);
- // Just update the entry points if it looks like we should.
- // TODO: sanity check all the pointers' values
- copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass().Ptr()));
- copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
- object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
- // No special handling for IMT conflict table since all pointers are moved by the same offset.
- copy->SetDataPtrSize(RelocatedAddressOfPointer(
- object->GetDataPtrSize(pointer_size)), pointer_size);
-}
-
-static int orig_argc;
-static char** orig_argv;
-
-static std::string CommandLine() {
- std::vector<std::string> command;
- for (int i = 0; i < orig_argc; ++i) {
- command.push_back(orig_argv[i]);
- }
- return android::base::Join(command, ' ');
-}
-
-static void UsageErrorV(const char* fmt, va_list ap) {
- std::string error;
- android::base::StringAppendV(&error, fmt, ap);
- LOG(ERROR) << error;
-}
-
-static void UsageError(const char* fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- UsageErrorV(fmt, ap);
- va_end(ap);
-}
-
-NO_RETURN static void Usage(const char *fmt, ...) {
- va_list ap;
- va_start(ap, fmt);
- UsageErrorV(fmt, ap);
- va_end(ap);
-
- UsageError("Command: %s", CommandLine().c_str());
- UsageError("Usage: patchoat [options]...");
- UsageError("");
- UsageError(" --instruction-set=<isa>: Specifies the instruction set the patched code is");
- UsageError(" compiled for (required).");
- UsageError("");
- UsageError(" --input-image-location=<file.art>: Specifies the 'location' of the image file to");
- UsageError(" be patched.");
- UsageError("");
- UsageError(" --output-image-directory=<dir>: Specifies the directory to write the patched");
- UsageError(" image file(s) to.");
- UsageError("");
- UsageError(" --output-image-relocation-directory=<dir>: Specifies the directory to write");
- UsageError(" the image relocation information to.");
- UsageError("");
- UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
- UsageError(" This value may be negative.");
- UsageError("");
- UsageError(" --verify: Verify an existing patched file instead of creating one.");
- UsageError("");
- UsageError(" --dump-timings: dump out patch timing information");
- UsageError("");
- UsageError(" --no-dump-timings: do not dump out patch timing information");
- UsageError("");
-
- exit(EXIT_FAILURE);
-}
-
-static int patchoat_patch_image(TimingLogger& timings,
- InstructionSet isa,
- const std::string& input_image_location,
- const std::string& output_image_directory,
- const std::string& output_image_relocation_directory,
- off_t base_delta,
- bool base_delta_set,
- bool debug) {
- CHECK(!input_image_location.empty());
- if ((output_image_directory.empty()) && (output_image_relocation_directory.empty())) {
- Usage("Image patching requires --output-image-directory or --output-image-relocation-directory");
- }
-
- if (!base_delta_set) {
- Usage("Must supply a desired new offset or delta.");
- }
-
- if (!IsAligned<kPageSize>(base_delta)) {
- Usage("Base offset/delta must be aligned to a pagesize (0x%08x) boundary.", kPageSize);
- }
-
- if (debug) {
- LOG(INFO) << "moving offset by " << base_delta
- << " (0x" << std::hex << base_delta << ") bytes or "
- << std::dec << (base_delta/kPageSize) << " pages.";
- }
-
- TimingLogger::ScopedTiming pt("patch image and oat", &timings);
-
- bool ret =
- PatchOat::Patch(
- input_image_location,
- base_delta,
- output_image_directory,
- output_image_relocation_directory,
- isa,
- &timings);
-
- if (kIsDebugBuild) {
- LOG(INFO) << "Exiting with return ... " << ret;
- }
- return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat_verify_image(TimingLogger& timings,
- InstructionSet isa,
- const std::string& input_image_location,
- const std::string& output_image_directory) {
- CHECK(!input_image_location.empty());
- TimingLogger::ScopedTiming pt("verify image and oat", &timings);
-
- bool ret =
- PatchOat::Verify(
- input_image_location,
- output_image_directory,
- isa,
- &timings);
-
- if (kIsDebugBuild) {
- LOG(INFO) << "Exiting with return ... " << ret;
- }
- return ret ? EXIT_SUCCESS : EXIT_FAILURE;
-}
-
-static int patchoat(int argc, char **argv) {
- Locks::Init();
- InitLogging(argv, Runtime::Abort);
- MemMap::Init();
- const bool debug = kIsDebugBuild;
- orig_argc = argc;
- orig_argv = argv;
- TimingLogger timings("patcher", false, false);
-
- // Skip over the command name.
- argv++;
- argc--;
-
- if (argc == 0) {
- Usage("No arguments specified");
- }
-
- timings.StartTiming("Patchoat");
-
- // cmd line args
- bool isa_set = false;
- InstructionSet isa = InstructionSet::kNone;
- std::string input_image_location;
- std::string output_image_directory;
- std::string output_image_relocation_directory;
- off_t base_delta = 0;
- bool base_delta_set = false;
- bool dump_timings = kIsDebugBuild;
- bool verify = false;
-
- for (int i = 0; i < argc; ++i) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
- }
- if (option.starts_with("--instruction-set=")) {
- isa_set = true;
- const char* isa_str = option.substr(strlen("--instruction-set=")).data();
- isa = GetInstructionSetFromString(isa_str);
- if (isa == InstructionSet::kNone) {
- Usage("Unknown or invalid instruction set %s", isa_str);
- }
- } else if (option.starts_with("--input-image-location=")) {
- input_image_location = option.substr(strlen("--input-image-location=")).data();
- } else if (option.starts_with("--output-image-directory=")) {
- output_image_directory = option.substr(strlen("--output-image-directory=")).data();
- } else if (option.starts_with("--output-image-relocation-directory=")) {
- output_image_relocation_directory =
- option.substr(strlen("--output-image-relocation-directory=")).data();
- } else if (option.starts_with("--base-offset-delta=")) {
- const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
- base_delta_set = true;
- if (!ParseInt(base_delta_str, &base_delta)) {
- Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
- }
- } else if (option == "--dump-timings") {
- dump_timings = true;
- } else if (option == "--no-dump-timings") {
- dump_timings = false;
- } else if (option == "--verify") {
- verify = true;
- } else {
- Usage("Unknown argument %s", option.data());
- }
- }
-
- // The instruction set is mandatory. This simplifies things...
- if (!isa_set) {
- Usage("Instruction set must be set.");
- }
-
- int ret;
- if (verify) {
- ret = patchoat_verify_image(timings,
- isa,
- input_image_location,
- output_image_directory);
- } else {
- ret = patchoat_patch_image(timings,
- isa,
- input_image_location,
- output_image_directory,
- output_image_relocation_directory,
- base_delta,
- base_delta_set,
- debug);
- }
-
- timings.EndTiming();
- if (dump_timings) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
- }
-
- return ret;
-}
-
-} // namespace art
-
-int main(int argc, char **argv) {
- return art::patchoat(argc, argv);
-}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
deleted file mode 100644
index ac2fdf5..0000000
--- a/patchoat/patchoat.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_PATCHOAT_PATCHOAT_H_
-#define ART_PATCHOAT_PATCHOAT_H_
-
-#include "arch/instruction_set.h"
-#include "base/enums.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "base/os.h"
-#include "elf_file.h"
-#include "elf_utils.h"
-#include "gc/accounting/space_bitmap.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "runtime.h"
-
-namespace art {
-
-class ArtMethod;
-class ImageHeader;
-class OatHeader;
-
-namespace mirror {
-class Object;
-class PointerArray;
-class Reference;
-class Class;
-} // namespace mirror
-
-class PatchOat {
- public:
- // Relocates the provided image by the specified offset. If output_image_directory is non-empty,
- // outputs the relocated image into that directory. If output_image_relocation_directory is
- // non-empty, outputs image relocation files (see GeneratePatch) into that directory.
- static bool Patch(const std::string& image_location,
- off_t delta,
- const std::string& output_image_directory,
- const std::string& output_image_relocation_directory,
- InstructionSet isa,
- TimingLogger* timings);
- static bool Verify(const std::string& image_location,
- const std::string& output_image_filename,
- InstructionSet isa,
- TimingLogger* timings);
-
- // Generates a patch which can be used to efficiently relocate the original file or to check that
- // a relocated file matches the original. The patch is generated from the difference of the
- // |original| and the already |relocated| image, and written to |output| in the form of unsigned
- // LEB128 for each relocation position.
- static bool GeneratePatch(const MemMap& original,
- const MemMap& relocated,
- std::vector<uint8_t>* output,
- std::string* error_msg);
-
- ~PatchOat() {}
- PatchOat(PatchOat&&) = default;
-
- private:
- // All pointers are only borrowed.
- PatchOat(InstructionSet isa, MemMap* image,
- gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
- std::map<gc::space::ImageSpace*, MemMap>* map, TimingLogger* timings)
- : image_(image), bitmap_(bitmap), heap_(heap),
- delta_(delta), isa_(isa), space_map_(map), timings_(timings) {}
-
- // Was the .art image at image_path made with --compile-pic ?
- static bool IsImagePic(const ImageHeader& image_header, const std::string& image_path);
-
- enum MaybePic {
- NOT_PIC, // Code not pic. Patch as usual.
- PIC, // Code was pic. Create symlink; skip OAT patching.
- ERROR_OAT_FILE, // Failed to symlink oat file
- ERROR_FIRST = ERROR_OAT_FILE,
- };
-
- // Was the .oat image at oat_in made with --compile-pic ?
- static MaybePic IsOatPic(const ElfFile* oat_in);
-
- static bool CreateVdexAndOatSymlinks(const std::string& input_image_filename,
- const std::string& output_image_filename);
-
-
- void VisitObject(mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupMethod(ArtMethod* object, ArtMethod* copy)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool PatchImage(bool primary_image) REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchArtFields(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchArtMethods(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchImTables(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchImtConflictTables(const ImageHeader* image_header)
- REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchInternedStrings(const ImageHeader* image_header)
- REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchClassTable(const ImageHeader* image_header)
- REQUIRES_SHARED(Locks::mutator_lock_);
- void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- bool WriteImage(File* out);
-
- template <typename T>
- T* RelocatedCopyOf(T* obj) const {
- if (obj == nullptr) {
- return nullptr;
- }
- DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin()));
- DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End()));
- uintptr_t heap_off =
- reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin());
- DCHECK_LT(heap_off, image_->Size());
- return reinterpret_cast<T*>(image_->Begin() + heap_off);
- }
-
- template <typename T>
- T* RelocatedCopyOfFollowImages(T* obj) const {
- if (obj == nullptr) {
- return nullptr;
- }
- // Find ImageSpace this belongs to.
- auto image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- for (gc::space::ImageSpace* image_space : image_spaces) {
- if (image_space->Contains(obj)) {
- uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) -
- reinterpret_cast<uintptr_t>(image_space->GetMemMap()->Begin());
- return reinterpret_cast<T*>(space_map_->find(image_space)->second.Begin() + heap_off);
- }
- }
- LOG(FATAL) << "Did not find object in boot image space " << obj;
- UNREACHABLE();
- }
-
- template <typename T>
- T* RelocatedAddressOfPointer(T* obj) const {
- if (obj == nullptr) {
- return obj;
- }
- auto ret = reinterpret_cast<uintptr_t>(obj) + delta_;
- // Trim off high bits in case negative relocation with 64 bit patchoat.
- if (Is32BitISA()) {
- ret = static_cast<uintptr_t>(static_cast<uint32_t>(ret));
- }
- return reinterpret_cast<T*>(ret);
- }
-
- bool Is32BitISA() const {
- return InstructionSetPointerSize(isa_) == PointerSize::k32;
- }
-
- // Walks through the old image and patches the mmap'd copy of it to the new offset. It does not
- // change the heap.
- class PatchVisitor {
- public:
- PatchVisitor(PatchOat* patcher, mirror::Object* copy) : patcher_(patcher), copy_(copy) {}
- ~PatchVisitor() {}
- void operator() (ObjPtr<mirror::Object> obj, MemberOffset off, bool b) const
- REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- // For reference classes.
- void operator() (ObjPtr<mirror::Class> cls, ObjPtr<mirror::Reference> ref) const
- REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- // TODO: Consider using these for updating native class roots?
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
- const {}
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
-
- private:
- PatchOat* const patcher_;
- mirror::Object* const copy_;
- };
-
- // A mmap of the image we are patching. This is modified.
- const MemMap* const image_;
- // The bitmap over the image within the heap we are patching. This is not modified.
- gc::accounting::ContinuousSpaceBitmap* const bitmap_;
- // The heap we are patching. This is not modified.
- const MemMap* const heap_;
- // The amount we are changing the offset by.
- const off_t delta_;
- // Active instruction set, used to know the entrypoint size.
- const InstructionSet isa_;
-
- const std::map<gc::space::ImageSpace*, MemMap>* space_map_;
-
- TimingLogger* timings_;
-
- class FixupRootVisitor;
- class RelocatedPointerVisitor;
- class PatchOatArtFieldVisitor;
- class PatchOatArtMethodVisitor;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
-};
-
-} // namespace art
-#endif // ART_PATCHOAT_PATCHOAT_H_
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
deleted file mode 100644
index 08bf31c..0000000
--- a/patchoat/patchoat_test.cc
+++ /dev/null
@@ -1,617 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <openssl/sha.h>
-#include <dirent.h>
-#include <sys/types.h>
-
-#include <string>
-#include <vector>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/hex_dump.h"
-#include "base/leb128.h"
-#include "dexopt_test.h"
-#include "runtime.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-using android::base::StringPrintf;
-
-class PatchoatTest : public DexoptTest {
- public:
- static bool ListDirFilesEndingWith(
- const std::string& dir,
- const std::string& suffix,
- std::vector<std::string>* filenames,
- std::string* error_msg) {
- DIR* d = opendir(dir.c_str());
- if (d == nullptr) {
- *error_msg = "Failed to open directory";
- return false;
- }
- dirent* e;
- struct stat s;
- size_t suffix_len = suffix.size();
- while ((e = readdir(d)) != nullptr) {
- if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
- continue;
- }
- size_t name_len = strlen(e->d_name);
- if ((name_len < suffix_len) || (strcmp(&e->d_name[name_len - suffix_len], suffix.c_str()))) {
- continue;
- }
- std::string basename(e->d_name);
- std::string filename = dir + "/" + basename;
- int stat_result = lstat(filename.c_str(), &s);
- if (stat_result != 0) {
- *error_msg =
- StringPrintf("Failed to stat %s: stat returned %d", filename.c_str(), stat_result);
- return false;
- }
- if (S_ISDIR(s.st_mode)) {
- continue;
- }
- filenames->push_back(basename);
- }
- closedir(d);
- return true;
- }
-
- static void AddRuntimeArg(std::vector<std::string>& args, const std::string& arg) {
- args.push_back("--runtime-arg");
- args.push_back(arg);
- }
-
- bool CompileBootImage(const std::vector<std::string>& extra_args,
- const std::string& image_file_name_prefix,
- uint32_t base_addr,
- std::string* error_msg) {
- Runtime* const runtime = Runtime::Current();
- std::vector<std::string> argv;
- argv.push_back(runtime->GetCompilerExecutable());
- AddRuntimeArg(argv, "-Xms64m");
- AddRuntimeArg(argv, "-Xmx64m");
- std::vector<std::string> dex_files = GetLibCoreDexFileNames();
- for (const std::string& dex_file : dex_files) {
- argv.push_back("--dex-file=" + dex_file);
- argv.push_back("--dex-location=" + dex_file);
- }
- if (runtime->IsJavaDebuggable()) {
- argv.push_back("--debuggable");
- }
- runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
- AddRuntimeArg(argv, "-Xverify:softfail");
-
- if (!kIsTargetBuild) {
- argv.push_back("--host");
- }
-
- argv.push_back("--image=" + image_file_name_prefix + ".art");
- argv.push_back("--oat-file=" + image_file_name_prefix + ".oat");
- argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
- argv.push_back(StringPrintf("--base=0x%" PRIx32, base_addr));
- argv.push_back("--compile-pic");
- argv.push_back("--multi-image");
- argv.push_back("--no-generate-debug-info");
-
- std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
- argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
- // We must set --android-root.
- const char* android_root = getenv("ANDROID_ROOT");
- CHECK(android_root != nullptr);
- argv.push_back("--android-root=" + std::string(android_root));
- argv.insert(argv.end(), extra_args.begin(), extra_args.end());
-
- return RunDex2OatOrPatchoat(argv, error_msg);
- }
-
- static std::vector<std::string> BasePatchoatCommand(const std::string& input_image_location,
- off_t base_offset_delta) {
- Runtime* const runtime = Runtime::Current();
- std::vector<std::string> argv;
- argv.push_back(runtime->GetPatchoatExecutable());
- argv.push_back("--input-image-location=" + input_image_location);
- argv.push_back(StringPrintf("--base-offset-delta=0x%jx", (intmax_t) base_offset_delta));
- argv.push_back(StringPrintf("--instruction-set=%s", GetInstructionSetString(kRuntimeISA)));
-
- return argv;
- }
-
- bool RelocateBootImage(const std::string& input_image_location,
- const std::string& output_image_directory,
- off_t base_offset_delta,
- std::string* error_msg) {
- std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
- argv.push_back("--output-image-directory=" + output_image_directory);
-
- return RunDex2OatOrPatchoat(argv, error_msg);
- }
-
- bool VerifyBootImage(const std::string& input_image_location,
- const std::string& output_image_directory,
- off_t base_offset_delta,
- std::string* error_msg) {
- std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
- argv.push_back("--output-image-directory=" + output_image_directory);
- argv.push_back("--verify");
-
- return RunDex2OatOrPatchoat(argv, error_msg);
- }
-
- bool GenerateBootImageRelFile(const std::string& input_image_location,
- const std::string& output_rel_directory,
- off_t base_offset_delta,
- std::string* error_msg) {
- std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
- argv.push_back("--output-image-relocation-directory=" + output_rel_directory);
-
- return RunDex2OatOrPatchoat(argv, error_msg);
- }
-
- bool RunDex2OatOrPatchoat(const std::vector<std::string>& args, std::string* error_msg) {
- int link[2];
-
- if (pipe(link) == -1) {
- return false;
- }
-
- pid_t pid = fork();
- if (pid == -1) {
- return false;
- }
-
- if (pid == 0) {
- // We need dex2oat to actually log things.
- setenv("ANDROID_LOG_TAGS", "*:e", 1);
- dup2(link[1], STDERR_FILENO);
- close(link[0]);
- close(link[1]);
- std::vector<const char*> c_args;
- for (const std::string& str : args) {
- c_args.push_back(str.c_str());
- }
- c_args.push_back(nullptr);
- execv(c_args[0], const_cast<char* const*>(c_args.data()));
- exit(1);
- UNREACHABLE();
- } else {
- close(link[1]);
- char buffer[128];
- memset(buffer, 0, 128);
- ssize_t bytes_read = 0;
-
- while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
- *error_msg += std::string(buffer, bytes_read);
- }
- close(link[0]);
- int status = -1;
- if (waitpid(pid, &status, 0) != -1) {
- return (status == 0);
- }
- return false;
- }
- }
-
- bool CompileBootImageToDir(
- const std::string& output_dir,
- const std::vector<std::string>& dex2oat_extra_args,
- uint32_t base_addr,
- std::string* error_msg) {
- return CompileBootImage(dex2oat_extra_args, output_dir + "/boot", base_addr, error_msg);
- }
-
- bool CopyImageChecksumAndSetPatchDelta(
- const std::string& src_image_filename,
- const std::string& dest_image_filename,
- off_t dest_patch_delta,
- std::string* error_msg) {
- std::unique_ptr<File> src_file(OS::OpenFileForReading(src_image_filename.c_str()));
- if (src_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open source image file %s", src_image_filename.c_str());
- return false;
- }
- ImageHeader src_header;
- if (!src_file->ReadFully(&src_header, sizeof(src_header))) {
- *error_msg = StringPrintf("Failed to read source image file %s", src_image_filename.c_str());
- return false;
- }
-
- std::unique_ptr<File> dest_file(OS::OpenFileReadWrite(dest_image_filename.c_str()));
- if (dest_file.get() == nullptr) {
- *error_msg =
- StringPrintf("Failed to open destination image file %s", dest_image_filename.c_str());
- return false;
- }
- ImageHeader dest_header;
- if (!dest_file->ReadFully(&dest_header, sizeof(dest_header))) {
- *error_msg =
- StringPrintf("Failed to read destination image file %s", dest_image_filename.c_str());
- return false;
- }
- dest_header.SetOatChecksum(src_header.GetOatChecksum());
- dest_header.SetPatchDelta(dest_patch_delta);
- if (!dest_file->ResetOffset()) {
- *error_msg =
- StringPrintf(
- "Failed to seek to start of destination image file %s", dest_image_filename.c_str());
- return false;
- }
- if (!dest_file->WriteFully(&dest_header, sizeof(dest_header))) {
- *error_msg =
- StringPrintf("Failed to write to destination image file %s", dest_image_filename.c_str());
- dest_file->Erase();
- return false;
- }
- if (dest_file->FlushCloseOrErase() != 0) {
- *error_msg =
- StringPrintf(
- "Failed to flush/close destination image file %s", dest_image_filename.c_str());
- return false;
- }
-
- return true;
- }
-
- bool ReadFully(
- const std::string& filename, std::vector<uint8_t>* contents, std::string* error_msg) {
- std::unique_ptr<File> file(OS::OpenFileForReading(filename.c_str()));
- if (file.get() == nullptr) {
- *error_msg = "Failed to open";
- return false;
- }
- int64_t size = file->GetLength();
- if (size < 0) {
- *error_msg = "Failed to get size";
- return false;
- }
- contents->resize(size);
- if (!file->ReadFully(&(*contents)[0], size)) {
- *error_msg = "Failed to read";
- contents->clear();
- return false;
- }
- return true;
- }
-
- bool BinaryDiff(
- const std::string& filename1, const std::string& filename2, std::string* error_msg) {
- std::string read_error_msg;
- std::vector<uint8_t> image1;
- if (!ReadFully(filename1, &image1, &read_error_msg)) {
- *error_msg = StringPrintf("Failed to read %s: %s", filename1.c_str(), read_error_msg.c_str());
- return true;
- }
- std::vector<uint8_t> image2;
- if (!ReadFully(filename2, &image2, &read_error_msg)) {
- *error_msg = StringPrintf("Failed to read %s: %s", filename2.c_str(), read_error_msg.c_str());
- return true;
- }
- if (image1.size() != image1.size()) {
- *error_msg =
- StringPrintf(
- "%s and %s are of different size: %zu vs %zu",
- filename1.c_str(),
- filename2.c_str(),
- image1.size(),
- image2.size());
- return true;
- }
- size_t size = image1.size();
- for (size_t i = 0; i < size; i++) {
- if (image1[i] != image2[i]) {
- *error_msg =
- StringPrintf("%s and %s differ at offset %zu", filename1.c_str(), filename2.c_str(), i);
- size_t hexdump_size = std::min<size_t>(16u, size - i);
- HexDump dump1(&image1[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
- HexDump dump2(&image2[i], hexdump_size, /* show_actual_addresses */ false, /* prefix */ "");
- std::ostringstream oss;
- oss << "\n" << dump1 << "\n" << dump2;
- *error_msg += oss.str();
- return true;
- }
- }
-
- return false;
- }
-};
-
-TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
-#if defined(ART_USE_READ_BARRIER)
- // This test checks that relocating a boot image using patchoat produces the same result as
- // producing the boot image for that relocated base address using dex2oat. To be precise, these
- // two files will have two small differences: the OAT checksum and base address. However, this
- // test takes this into account.
-
- // Compile boot image into a random directory using dex2oat
- ScratchFile dex2oat_orig_scratch;
- dex2oat_orig_scratch.Unlink();
- std::string dex2oat_orig_dir = dex2oat_orig_scratch.GetFilename();
- ASSERT_EQ(0, mkdir(dex2oat_orig_dir.c_str(), 0700));
- const uint32_t orig_base_addr = 0x60000000;
- // Force deterministic output. We want the boot images created by this dex2oat run and the run
- // below to differ only in their base address.
- std::vector<std::string> dex2oat_extra_args;
- dex2oat_extra_args.push_back("--force-determinism");
- dex2oat_extra_args.push_back("-j1"); // Might not be needed. Causes a 3-5x slowdown.
- std::string error_msg;
- if (!CompileBootImageToDir(dex2oat_orig_dir, dex2oat_extra_args, orig_base_addr, &error_msg)) {
- FAIL() << "CompileBootImage1 failed: " << error_msg;
- }
-
- // Compile a "relocated" boot image into a random directory using dex2oat. This image is relocated
- // in the sense that it uses a different base address.
- ScratchFile dex2oat_reloc_scratch;
- dex2oat_reloc_scratch.Unlink();
- std::string dex2oat_reloc_dir = dex2oat_reloc_scratch.GetFilename();
- ASSERT_EQ(0, mkdir(dex2oat_reloc_dir.c_str(), 0700));
- const uint32_t reloc_base_addr = 0x70000000;
- if (!CompileBootImageToDir(dex2oat_reloc_dir, dex2oat_extra_args, reloc_base_addr, &error_msg)) {
- FAIL() << "CompileBootImage2 failed: " << error_msg;
- }
- const off_t base_addr_delta = reloc_base_addr - orig_base_addr;
-
- // Relocate the original boot image using patchoat. The image is relocated by the same amount
- // as the second/relocated image produced by dex2oat.
- ScratchFile patchoat_scratch;
- patchoat_scratch.Unlink();
- std::string patchoat_dir = patchoat_scratch.GetFilename();
- ASSERT_EQ(0, mkdir(patchoat_dir.c_str(), 0700));
- std::string dex2oat_orig_with_arch_dir =
- dex2oat_orig_dir + "/" + GetInstructionSetString(kRuntimeISA);
- // The arch-including symlink is needed by patchoat
- ASSERT_EQ(0, symlink(dex2oat_orig_dir.c_str(), dex2oat_orig_with_arch_dir.c_str()));
- if (!RelocateBootImage(
- dex2oat_orig_dir + "/boot.art",
- patchoat_dir,
- base_addr_delta,
- &error_msg)) {
- FAIL() << "RelocateBootImage failed: " << error_msg;
- }
-
- // Assert that patchoat created the same set of .art files as dex2oat
- std::vector<std::string> dex2oat_image_basenames;
- std::vector<std::string> patchoat_image_basenames;
- if (!ListDirFilesEndingWith(dex2oat_reloc_dir, ".art", &dex2oat_image_basenames, &error_msg)) {
- FAIL() << "Failed to list *.art files in " << dex2oat_reloc_dir << ": " << error_msg;
- }
- if (!ListDirFilesEndingWith(patchoat_dir, ".art", &patchoat_image_basenames, &error_msg)) {
- FAIL() << "Failed to list *.art files in " << patchoat_dir << ": " << error_msg;
- }
- std::sort(dex2oat_image_basenames.begin(), dex2oat_image_basenames.end());
- std::sort(patchoat_image_basenames.begin(), patchoat_image_basenames.end());
- // .art file names output by patchoat look like tmp@art-data-<random>-<random>@boot*.art. To
- // compare these with .art file names output by dex2oat we retain only the part of the file name
- // after the last @.
- std::vector<std::string> patchoat_image_shortened_basenames(patchoat_image_basenames.size());
- for (size_t i = 0; i < patchoat_image_basenames.size(); i++) {
- patchoat_image_shortened_basenames[i] =
- patchoat_image_basenames[i].substr(patchoat_image_basenames[i].find_last_of("@") + 1);
- }
- ASSERT_EQ(dex2oat_image_basenames, patchoat_image_shortened_basenames);
-
- // Patch up the dex2oat-relocated image files so that it looks as though they were relocated by
- // patchoat. patchoat preserves the OAT checksum header field and sets patch delta header field.
- for (const std::string& image_basename : dex2oat_image_basenames) {
- if (!CopyImageChecksumAndSetPatchDelta(
- dex2oat_orig_dir + "/" + image_basename,
- dex2oat_reloc_dir + "/" + image_basename,
- base_addr_delta,
- &error_msg)) {
- FAIL() << "Unable to patch up " << image_basename << ": " << error_msg;
- }
- }
-
- // Assert that the patchoat-relocated images are identical to the dex2oat-relocated images
- for (size_t i = 0; i < dex2oat_image_basenames.size(); i++) {
- const std::string& dex2oat_image_basename = dex2oat_image_basenames[i];
- const std::string& dex2oat_image_filename = dex2oat_reloc_dir + "/" + dex2oat_image_basename;
- const std::string& patchoat_image_filename = patchoat_dir + "/" + patchoat_image_basenames[i];
- if (BinaryDiff(dex2oat_image_filename, patchoat_image_filename, &error_msg)) {
- FAIL() << "patchoat- and dex2oat-relocated variants of " << dex2oat_image_basename
- << " differ: " << error_msg;
- }
- }
-
- ClearDirectory(dex2oat_orig_dir.c_str(), /*recursive*/ true);
- ClearDirectory(dex2oat_reloc_dir.c_str(), /*recursive*/ true);
- ClearDirectory(patchoat_dir.c_str(), /*recursive*/ true);
- rmdir(dex2oat_orig_dir.c_str());
- rmdir(dex2oat_reloc_dir.c_str());
- rmdir(patchoat_dir.c_str());
-#else
- LOG(INFO) << "Skipping PatchoatRelocationSameAsDex2oatRelocation";
- // Force-print to std::cout so it's also outside the logcat.
- std::cout << "Skipping PatchoatRelocationSameAsDex2oatRelocation" << std::endl;
-#endif
-}
-
-// These tests check that a boot image relocated using patchoat can be unrelocated
-// using the .rel file created by patchoat.
-//
-// The tests don't work when heap poisoning is enabled because some of the
-// references are negated. b/72117833 is tracking the effort to have patchoat
-// and its tests support heap poisoning.
-class PatchoatVerificationTest : public PatchoatTest {
- protected:
- void CreateRelocatedBootImage() {
- // Compile boot image into a random directory using dex2oat
- ScratchFile dex2oat_orig_scratch;
- dex2oat_orig_scratch.Unlink();
- dex2oat_orig_dir_ = dex2oat_orig_scratch.GetFilename();
- ASSERT_EQ(0, mkdir(dex2oat_orig_dir_.c_str(), 0700));
- const uint32_t orig_base_addr = 0x60000000;
- std::vector<std::string> dex2oat_extra_args;
- std::string error_msg;
- if (!CompileBootImageToDir(dex2oat_orig_dir_, dex2oat_extra_args, orig_base_addr, &error_msg)) {
- FAIL() << "CompileBootImage1 failed: " << error_msg;
- }
-
- // Generate image relocation file for the original boot image
- std::string dex2oat_orig_with_arch_dir =
- dex2oat_orig_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
- // The arch-including symlink is needed by patchoat
- ASSERT_EQ(0, symlink(dex2oat_orig_dir_.c_str(), dex2oat_orig_with_arch_dir.c_str()));
- base_addr_delta_ = 0x100000;
- if (!GenerateBootImageRelFile(
- dex2oat_orig_dir_ + "/boot.art",
- dex2oat_orig_dir_,
- base_addr_delta_,
- &error_msg)) {
- FAIL() << "RelocateBootImage failed: " << error_msg;
- }
-
- // Relocate the original boot image using patchoat
- ScratchFile relocated_scratch;
- relocated_scratch.Unlink();
- relocated_dir_ = relocated_scratch.GetFilename();
- ASSERT_EQ(0, mkdir(relocated_dir_.c_str(), 0700));
- // Use a different relocation delta from the one used when generating .rel files above. This is
- // to make sure .rel files are not specific to a particular relocation delta.
- base_addr_delta_ -= 0x10000;
- if (!RelocateBootImage(
- dex2oat_orig_dir_ + "/boot.art",
- relocated_dir_,
- base_addr_delta_,
- &error_msg)) {
- FAIL() << "RelocateBootImage failed: " << error_msg;
- }
-
- // Assert that patchoat created the same set of .art and .art.rel files
- std::vector<std::string> rel_basenames;
- std::vector<std::string> relocated_image_basenames;
- if (!ListDirFilesEndingWith(dex2oat_orig_dir_, ".rel", &rel_basenames, &error_msg)) {
- FAIL() << "Failed to list *.art.rel files in " << dex2oat_orig_dir_ << ": " << error_msg;
- }
- if (!ListDirFilesEndingWith(relocated_dir_, ".art", &relocated_image_basenames, &error_msg)) {
- FAIL() << "Failed to list *.art files in " << relocated_dir_ << ": " << error_msg;
- }
- std::sort(rel_basenames.begin(), rel_basenames.end());
- std::sort(relocated_image_basenames.begin(), relocated_image_basenames.end());
-
- // .art and .art.rel file names output by patchoat look like
- // tmp@art-data-<random>-<random>@boot*.art, encoding the name of the directory in their name.
- // To compare these with each other, we retain only the part of the file name after the last @,
- // and we also drop the extension.
- std::vector<std::string> rel_shortened_basenames(rel_basenames.size());
- std::vector<std::string> relocated_image_shortened_basenames(relocated_image_basenames.size());
- for (size_t i = 0; i < rel_basenames.size(); i++) {
- rel_shortened_basenames[i] = rel_basenames[i].substr(rel_basenames[i].find_last_of("@") + 1);
- rel_shortened_basenames[i] =
- rel_shortened_basenames[i].substr(0, rel_shortened_basenames[i].find("."));
- }
- for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
- relocated_image_shortened_basenames[i] =
- relocated_image_basenames[i].substr(relocated_image_basenames[i].find_last_of("@") + 1);
- relocated_image_shortened_basenames[i] =
- relocated_image_shortened_basenames[i].substr(
- 0, relocated_image_shortened_basenames[i].find("."));
- }
- ASSERT_EQ(rel_shortened_basenames, relocated_image_shortened_basenames);
- }
-
- virtual void TearDown() {
- if (!dex2oat_orig_dir_.empty()) {
- ClearDirectory(dex2oat_orig_dir_.c_str(), /*recursive*/ true);
- rmdir(dex2oat_orig_dir_.c_str());
- }
- if (!relocated_dir_.empty()) {
- ClearDirectory(relocated_dir_.c_str(), /*recursive*/ true);
- rmdir(relocated_dir_.c_str());
- }
- PatchoatTest::TearDown();
- }
-
- std::string dex2oat_orig_dir_;
- std::string relocated_dir_;
- off_t base_addr_delta_;
-};
-
-// Assert that verification works with the .rel files.
-TEST_F(PatchoatVerificationTest, Sucessful) {
- TEST_DISABLED_FOR_HEAP_POISONING();
- CreateRelocatedBootImage();
-
- std::string error_msg;
- if (!VerifyBootImage(
- dex2oat_orig_dir_ + "/boot.art",
- relocated_dir_,
- base_addr_delta_,
- &error_msg)) {
- FAIL() << "VerifyBootImage failed: " << error_msg;
- }
-}
-
-// Corrupt the image file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedImage) {
- TEST_DISABLED_FOR_HEAP_POISONING();
- CreateRelocatedBootImage();
-
- std::string error_msg;
- std::string relocated_image_filename;
- if (!GetDalvikCacheFilename((dex2oat_orig_dir_ + "/boot.art").c_str(),
- relocated_dir_.c_str(),
- &relocated_image_filename,
- &error_msg)) {
- FAIL() << "Failed to find relocated image file name: " << error_msg;
- }
- ASSERT_EQ(truncate(relocated_image_filename.c_str(), sizeof(ImageHeader)), 0)
- << relocated_image_filename;
-
- if (VerifyBootImage(
- dex2oat_orig_dir_ + "/boot.art",
- relocated_dir_,
- base_addr_delta_,
- &error_msg)) {
- FAIL() << "VerifyBootImage should have failed since the image was intentionally corrupted";
- }
-}
-
-// Corrupt the relocation file and check that the verification fails gracefully.
-TEST_F(PatchoatVerificationTest, CorruptedRelFile) {
- TEST_DISABLED_FOR_HEAP_POISONING();
- CreateRelocatedBootImage();
-
- std::string error_msg;
- std::string art_filename = dex2oat_orig_dir_ + "/boot.art";
- std::string rel_filename = dex2oat_orig_dir_ + "/boot.art.rel";
- std::unique_ptr<File> art_file(OS::OpenFileForReading(art_filename.c_str()));
- std::unique_ptr<File> rel_file(OS::OpenFileReadWrite(rel_filename.c_str()));
- rel_file->ClearContent();
- uint8_t buffer[64] = {};
- ASSERT_TRUE(rel_file->WriteFully(&buffer, SHA256_DIGEST_LENGTH));
- // Encode single relocation which is just past the end of the image file.
- size_t leb_size = EncodeUnsignedLeb128(buffer, art_file->GetLength()) - buffer;
- ASSERT_TRUE(rel_file->WriteFully(&buffer, leb_size));
- ASSERT_EQ(rel_file->FlushClose(), 0);
- ASSERT_EQ(art_file->Close(), 0);
-
- if (VerifyBootImage(
- dex2oat_orig_dir_ + "/boot.art",
- relocated_dir_,
- base_addr_delta_,
- &error_msg)) {
- FAIL() << "VerifyBootImage should have failed since the rel file was intentionally corrupted";
- }
-}
-
-} // namespace art
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index 6715680..4d8eef9 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -38,7 +38,7 @@
// Avoid merging classes since we may want to only add classes that fit a certain criteria.
// If we merged the classes, every single class in each profile would be in the out_profile,
// but we want to only included classes that are in at least a few profiles.
- out_profile->MergeWith(*profile, /*merge_classes*/ false);
+ out_profile->MergeWith(*profile, /*merge_classes=*/ false);
}
// Image classes that were added because they are commonly used.
@@ -96,7 +96,7 @@
is_clean = false;
}
},
- /*instance_fields*/ VoidFunctor(),
+ /*instance_field_visitor=*/ VoidFunctor(),
method_visitor,
method_visitor);
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b509fb4..4dc5262 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -37,7 +37,7 @@
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
+ if (!info.Load(reference_profile_file->Fd(), /*merge_classes=*/ true, filter_fn)) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -49,7 +49,7 @@
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
+ if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes=*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -92,7 +92,7 @@
// Will block until all the locks are acquired.
bool Init(const std::vector<std::string>& filenames, /* out */ std::string* error) {
for (size_t i = 0; i < filenames.size(); i++) {
- flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block */ true, error);
+ flocks_[i] = LockedFile::Open(filenames[i].c_str(), O_RDWR, /* block= */ true, error);
if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
@@ -106,7 +106,7 @@
for (size_t i = 0; i < fds.size(); i++) {
DCHECK_GE(fds[i], 0);
flocks_[i] = LockedFile::DupOf(fds[i], "profile-file",
- true /* read_only_mode */, error);
+ /* read_only_mode= */ true, error);
if (flocks_[i].get() == nullptr) {
*error += " (index=" + std::to_string(i) + ")";
return false;
@@ -138,7 +138,7 @@
// cleared after processing.
ScopedFlock reference_profile_file = LockedFile::DupOf(reference_profile_file_fd,
"reference-profile",
- false /* read_only_mode */,
+ /* read_only_mode= */ false,
&error);
if (reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profiled files: " << error;
@@ -163,7 +163,7 @@
}
ScopedFlock locked_reference_profile_file = LockedFile::Open(
- reference_profile_file.c_str(), O_RDWR, /* block */ true, &error);
+ reference_profile_file.c_str(), O_RDWR, /* block= */ true, &error);
if (locked_reference_profile_file.get() == nullptr) {
LOG(WARNING) << "Could not lock reference profile files: " << error;
return kErrorCannotLock;
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 286b686..31dfbc0 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -116,9 +116,9 @@
void SetupBasicProfile(const std::string& id,
uint32_t checksum,
uint16_t number_of_methods,
- const std::vector<uint32_t> hot_methods,
- const std::vector<uint32_t> startup_methods,
- const std::vector<uint32_t> post_startup_methods,
+ const std::vector<uint32_t>& hot_methods,
+ const std::vector<uint32_t>& startup_methods,
+ const std::vector<uint32_t>& post_startup_methods,
const ScratchFile& profile,
ProfileCompilationInfo* info) {
std::string dex_location = "location1" + id;
@@ -720,7 +720,7 @@
ASSERT_TRUE(info.Load(GetFd(profile_file)));
// Verify that the profile has matching methods.
ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::Class> klass = GetClass(soa, /* class_loader */ nullptr, "Ljava/lang/Math;");
+ ObjPtr<mirror::Class> klass = GetClass(soa, /* class_loader= */ nullptr, "Ljava/lang/Math;");
ASSERT_TRUE(klass != nullptr);
size_t method_count = 0;
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
@@ -932,8 +932,8 @@
AssertInlineCaches(inline_monomorphic,
expected_monomorphic,
info,
- /*megamorphic*/false,
- /*missing_types*/false);
+ /*is_megamorphic=*/false,
+ /*is_missing_types=*/false);
}
{
@@ -949,8 +949,8 @@
AssertInlineCaches(inline_polymorhic,
expected_polymorphic,
info,
- /*megamorphic*/false,
- /*missing_types*/false);
+ /*is_megamorphic=*/false,
+ /*is_missing_types=*/false);
}
{
@@ -963,8 +963,8 @@
AssertInlineCaches(inline_megamorphic,
expected_megamorphic,
info,
- /*megamorphic*/true,
- /*missing_types*/false);
+ /*is_megamorphic=*/true,
+ /*is_missing_types=*/false);
}
{
@@ -977,8 +977,8 @@
AssertInlineCaches(inline_missing_types,
expected_missing_Types,
info,
- /*megamorphic*/false,
- /*missing_types*/true);
+ /*is_megamorphic=*/false,
+ /*is_missing_types=*/true);
}
{
@@ -1005,7 +1005,7 @@
const uint16_t kNumberOfMethodsToEnableCompilation = 100;
ProfileCompilationInfo info1;
SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1,
- /*start_method_index*/0, /*reverse_dex_write_order*/false);
+ /*start_method_index=*/0, /*reverse_dex_write_order=*/false);
// The reference profile info will contain the methods with indices 50-150.
// When setting up the profile reverse the order in which the dex files
@@ -1014,7 +1014,7 @@
const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
ProfileCompilationInfo reference_info;
SetupProfile("p1", 1, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
- &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order*/true);
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order=*/true);
// We should advise compilation.
ASSERT_EQ(ProfileAssistant::kCompile,
@@ -1233,9 +1233,9 @@
ProfileCompilationInfo info2_filter;
ProfileCompilationInfo expected;
- info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
- info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
- expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+ info2_filter.Load(profile1.GetFd(), /*merge_classes=*/ true, filter_fn);
+ info2_filter.Load(profile2.GetFd(), /*merge_classes=*/ true, filter_fn);
+ expected.Load(reference_profile.GetFd(), /*merge_classes=*/ true, filter_fn);
ASSERT_TRUE(expected.MergeWith(info1_filter));
ASSERT_TRUE(expected.MergeWith(info2_filter));
@@ -1260,13 +1260,13 @@
"fake-location2",
d2.GetLocationChecksum(),
num_methods_to_add,
- /*num_classes*/ 0,
+ /*number_of_classes=*/ 0,
profile1,
&info1,
- /*start_method_index*/ 0,
- /*reverse_dex_write_order*/ false,
- /*number_of_methods1*/ d1.NumMethodIds(),
- /*number_of_methods2*/ d2.NumMethodIds());
+ /*start_method_index=*/ 0,
+ /*reverse_dex_write_order=*/ false,
+ /*number_of_methods1=*/ d1.NumMethodIds(),
+ /*number_of_methods2=*/ d2.NumMethodIds());
// Run profman and pass the dex file with --apk-fd.
android::base::unique_fd apk_fd(
diff --git a/profman/profman.cc b/profman/profman.cc
index cecd3c2..d989c8c 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -35,6 +35,7 @@
#include "base/logging.h" // For InitLogging.
#include "base/mem_map.h"
#include "base/scoped_flock.h"
+#include "base/stl_util.h"
#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/unix_file/fd_file.h"
@@ -426,7 +427,7 @@
if (use_apk_fd_list) {
if (dex_file_loader.OpenZip(apks_fd_[i],
dex_locations_[i],
- /* verify */ false,
+ /* verify= */ false,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
@@ -437,7 +438,7 @@
} else {
if (dex_file_loader.Open(apk_files_[i].c_str(),
dex_locations_[i],
- /* verify */ false,
+ /* verify= */ false,
kVerifyChecksum,
&error_msg,
&dex_files_for_location)) {
@@ -500,7 +501,7 @@
LOG(ERROR) << "Cannot load profile info from filename=" << filename << " fd=" << fd;
return -1;
}
- *dump += banner + "\n" + info->DumpInfo(dex_files) + "\n";
+ *dump += banner + "\n" + info->DumpInfo(MakeNonOwningPointerVector(*dex_files)) + "\n";
return 0;
}
@@ -513,10 +514,23 @@
static const char* kEmptyString = "";
static const char* kOrdinaryProfile = "=== profile ===";
static const char* kReferenceProfile = "=== reference profile ===";
+ static const char* kDexFiles = "=== Dex files ===";
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
+
std::string dump;
+
+ // Dump checkfiles and corresponding checksums.
+ dump += kDexFiles;
+ dump += "\n";
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ std::ostringstream oss;
+ oss << dex_file->GetLocation()
+ << " [checksum=" << std::hex << dex_file->GetLocationChecksum() << "]\n";
+ dump += oss.str();
+ }
+
// Dump individual profile files.
if (!profile_files_fd_.empty()) {
for (int profile_file_fd : profile_files_fd_) {
@@ -530,12 +544,10 @@
}
}
}
- if (!profile_files_.empty()) {
- for (const std::string& profile_file : profile_files_) {
- int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
- if (ret != 0) {
- return ret;
- }
+ for (const std::string& profile_file : profile_files_) {
+ int ret = DumpOneProfile(kOrdinaryProfile, profile_file, kInvalidFd, &dex_files, &dump);
+ if (ret != 0) {
+ return ret;
}
}
// Dump reference profile file.
@@ -562,7 +574,7 @@
if (!FdIsValid(dump_output_to_fd_)) {
std::cout << dump;
} else {
- unix_file::FdFile out_fd(dump_output_to_fd_, false /*check_usage*/);
+ unix_file::FdFile out_fd(dump_output_to_fd_, /*check_usage=*/ false);
if (!out_fd.WriteFully(dump.c_str(), dump.length())) {
return -1;
}
@@ -688,7 +700,7 @@
if (!FdIsValid(dump_output_to_fd_)) {
std::cout << dump;
} else {
- unix_file::FdFile out_fd(dump_output_to_fd_, false /*check_usage*/);
+ unix_file::FdFile out_fd(dump_output_to_fd_, /*check_usage=*/ false);
if (!out_fd.WriteFully(dump.c_str(), dump.length())) {
return -1;
}
@@ -912,7 +924,7 @@
flags |= ProfileCompilationInfo::MethodHotness::kFlagPostStartup;
}
- TypeReference class_ref(/* dex_file */ nullptr, dex::TypeIndex());
+ TypeReference class_ref(/* dex_file= */ nullptr, dex::TypeIndex());
if (!FindClass(dex_files, klass, &class_ref)) {
LOG(WARNING) << "Could not find class: " << klass;
return false;
@@ -981,7 +993,7 @@
return false;
}
std::vector<TypeReference> classes(inline_cache_elems.size(),
- TypeReference(/* dex_file */ nullptr, dex::TypeIndex()));
+ TypeReference(/* dex_file= */ nullptr, dex::TypeIndex()));
size_t class_it = 0;
for (const std::string& ic_class : inline_cache_elems) {
if (!FindClass(dex_files, ic_class, &(classes[class_it++]))) {
@@ -1201,7 +1213,7 @@
// Do not clear if invalid. The input might be an archive.
bool load_ok = use_fds
? profile.Load(profile_files_fd_[0])
- : profile.Load(profile_files_[0], /*clear_if_invalid*/ false);
+ : profile.Load(profile_files_[0], /*clear_if_invalid=*/ false);
if (load_ok) {
// Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
@@ -1211,7 +1223,7 @@
}
bool result = use_fds
? profile.Save(reference_profile_file_fd_)
- : profile.Save(reference_profile_file_, /*bytes_written*/ nullptr);
+ : profile.Save(reference_profile_file_, /*bytes_written=*/ nullptr);
return result ? 0 : kErrorFailedToSaveProfile;
} else {
return kErrorFailedToLoadProfile;
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 6ec6265..bedeaf7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -93,6 +93,7 @@
"instrumentation.cc",
"intern_table.cc",
"interpreter/interpreter.cc",
+ "interpreter/interpreter_cache.cc",
"interpreter/interpreter_common.cc",
"interpreter/interpreter_intrinsics.cc",
"interpreter/interpreter_switch_impl.cc",
@@ -239,14 +240,11 @@
"entrypoints/quick/quick_trampoline_entrypoints.cc",
],
- // b/77976998, clang lld does not recognize the --keep-unique flag.
- use_clang_lld: false,
-
arch: {
arm: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_arm.S",
+ ":libart_mterp.arm",
"arch/arm/context_arm.cc",
"arch/arm/entrypoints_init_arm.cc",
"arch/arm/instruction_set_features_assembly_tests.S",
@@ -261,7 +259,7 @@
arm64: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_arm64.S",
+ ":libart_mterp.arm64",
"arch/arm64/context_arm64.cc",
"arch/arm64/entrypoints_init_arm64.cc",
"arch/arm64/jni_entrypoints_arm64.S",
@@ -275,7 +273,7 @@
x86: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_x86.S",
+ ":libart_mterp.x86",
"arch/x86/context_x86.cc",
"arch/x86/entrypoints_init_x86.cc",
"arch/x86/jni_entrypoints_x86.S",
@@ -290,7 +288,7 @@
// Note that the fault_handler_x86.cc is not a mistake. This file is
// shared between the x86 and x86_64 architectures.
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_x86_64.S",
+ ":libart_mterp.x86_64",
"arch/x86_64/context_x86_64.cc",
"arch/x86_64/entrypoints_init_x86_64.cc",
"arch/x86_64/jni_entrypoints_x86_64.S",
@@ -304,7 +302,7 @@
mips: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_mips.S",
+ ":libart_mterp.mips",
"arch/mips/context_mips.cc",
"arch/mips/entrypoints_init_mips.cc",
"arch/mips/jni_entrypoints_mips.S",
@@ -317,7 +315,7 @@
mips64: {
srcs: [
"interpreter/mterp/mterp.cc",
- "interpreter/mterp/out/mterp_mips64.S",
+ ":libart_mterp.mips64",
"arch/mips64/context_mips64.cc",
"arch/mips64/entrypoints_init_mips64.cc",
"arch/mips64/jni_entrypoints_mips64.S",
@@ -381,6 +379,7 @@
],
header_libs: [
"art_cmdlineparser_headers",
+ "cpp-define-generator-definitions",
"libnativehelper_header_only",
"jni_platform_headers",
],
@@ -408,6 +407,49 @@
export_shared_lib_headers: ["libbase"],
}
+libart_static_cc_defaults {
+ name: "libart_static_base_defaults",
+ target: {
+ android: {
+ static_libs: ["libtombstoned_client_static"],
+ },
+ },
+ static_libs: [
+ "libbacktrace",
+ "libbase",
+ "libcutils",
+ "liblog",
+ "liblz4",
+ "liblzma",
+ "libnativebridge",
+ "libnativeloader",
+ "libunwindstack",
+ "libz",
+ ],
+}
+
+cc_defaults {
+ name: "libart_static_defaults",
+ defaults: [
+ "libart_static_base_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ ],
+ static_libs: ["libart"],
+}
+
+cc_defaults {
+ name: "libartd_static_defaults",
+ defaults: [
+ "libart_static_base_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ ],
+ static_libs: ["libartd"],
+}
+
gensrcs {
name: "art_operator_srcs",
cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
@@ -626,3 +668,51 @@
host_supported: true,
export_include_dirs: ["."],
}
+
+genrule {
+ name: "libart_mterp.arm",
+ out: ["mterp_arm.S"],
+ srcs: ["interpreter/mterp/arm/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.arm64",
+ out: ["mterp_arm64.S"],
+ srcs: ["interpreter/mterp/arm64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.mips",
+ out: ["mterp_mips.S"],
+ srcs: ["interpreter/mterp/mips/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.mips64",
+ out: ["mterp_mips64.S"],
+ srcs: ["interpreter/mterp/mips64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.x86",
+ out: ["mterp_x86.S"],
+ srcs: ["interpreter/mterp/x86/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
+
+genrule {
+ name: "libart_mterp.x86_64",
+ out: ["mterp_x86_64.S"],
+ srcs: ["interpreter/mterp/x86_64/*.S"],
+ tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
+ cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+}
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index d4dbbf9..12ad84b 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -22,26 +22,6 @@
#include "common_runtime_test.h"
#include "quick/quick_method_frame_info.h"
-// asm_support.h declares tests next to the #defines. We use asm_support_check.h to (safely)
-// generate CheckAsmSupportOffsetsAndSizes using gtest's EXPECT for the tests. We also use the
-// RETURN_TYPE, HEADER and FOOTER defines from asm_support_check.h to try to ensure that any
-// tests are actually generated.
-
-// Let CheckAsmSupportOffsetsAndSizes return a size_t (the count).
-#define ASM_SUPPORT_CHECK_RETURN_TYPE size_t
-
-// Declare the counter that will be updated per test.
-#define ASM_SUPPORT_CHECK_HEADER size_t count = 0;
-
-// Use EXPECT_EQ for tests, and increment the counter.
-#define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y); count++;
-
-// Return the counter at the end of CheckAsmSupportOffsetsAndSizes.
-#define ASM_SUPPORT_CHECK_FOOTER return count;
-
-// Generate CheckAsmSupportOffsetsAndSizes().
-#include "asm_support_check.h"
-
namespace art {
class ArchTest : public CommonRuntimeTest {
@@ -60,11 +40,6 @@
}
};
-TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
- size_t test_count = CheckAsmSupportOffsetsAndSizes();
- EXPECT_GT(test_count, 0u);
-}
-
// Grab architecture specific constants.
namespace arm {
#include "arch/arm/asm_support_arm.h"
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index 608999b..e97d2cb 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -51,6 +51,7 @@
"cortex-a72",
"cortex-a73",
"cortex-a75",
+ "cortex-a76",
"exynos-m1",
"denver",
"kryo"
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index d0f61c9..7796ca7 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -16,6 +16,11 @@
#include "instruction_set_features_arm64.h"
+#if defined(ART_TARGET_ANDROID) && defined(__aarch64__)
+#include <asm/hwcap.h>
+#include <sys/auxv.h>
+#endif
+
#include <fstream>
#include <sstream>
@@ -31,6 +36,10 @@
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg) {
+ // The CPU variant string is passed to ART through --instruction-set-variant option.
+ // During build, such setting is from TARGET_CPU_VARIANT in device BoardConfig.mk, for example:
+ // TARGET_CPU_VARIANT := cortex-a75
+
// Look for variants that need a fix for a53 erratum 835769.
static const char* arm64_variants_with_a53_835769_bug[] = {
// Pessimistically assume all generic CPUs are cortex-a53.
@@ -39,14 +48,70 @@
"cortex-a53",
"cortex-a53.a57",
"cortex-a53.a72",
- // Pessimistically assume all "big" cortex CPUs are paired with a cortex-a53.
+ // Pessimistically assume following "big" cortex CPUs are paired with a cortex-a53.
"cortex-a57",
"cortex-a72",
"cortex-a73",
};
+
+ static const char* arm64_variants_with_crc[] = {
+ "default",
+ "generic",
+ "kryo",
+ "exynos-m1",
+ "exynos-m2",
+ "exynos-m3",
+ "cortex-a35",
+ "cortex-a53",
+ "cortex-a53.a57",
+ "cortex-a53.a72",
+ "cortex-a57",
+ "cortex-a72",
+ "cortex-a73",
+ "cortex-a55",
+ "cortex-a75",
+ "cortex-a76",
+ };
+
+ static const char* arm64_variants_with_lse[] = {
+ "cortex-a55",
+ "cortex-a75",
+ "cortex-a76",
+ };
+
+ static const char* arm64_variants_with_fp16[] = {
+ "cortex-a55",
+ "cortex-a75",
+ "cortex-a76",
+ };
+
+ static const char* arm64_variants_with_dotprod[] = {
+ "cortex-a55",
+ "cortex-a75",
+ "cortex-a76",
+ };
+
bool needs_a53_835769_fix = FindVariantInArray(arm64_variants_with_a53_835769_bug,
arraysize(arm64_variants_with_a53_835769_bug),
variant);
+ // The variants that need a fix for 843419 are the same that need a fix for 835769.
+ bool needs_a53_843419_fix = needs_a53_835769_fix;
+
+ bool has_crc = FindVariantInArray(arm64_variants_with_crc,
+ arraysize(arm64_variants_with_crc),
+ variant);
+
+ bool has_lse = FindVariantInArray(arm64_variants_with_lse,
+ arraysize(arm64_variants_with_lse),
+ variant);
+
+ bool has_fp16 = FindVariantInArray(arm64_variants_with_fp16,
+ arraysize(arm64_variants_with_fp16),
+ variant);
+
+ bool has_dotprod = FindVariantInArray(arm64_variants_with_dotprod,
+ arraysize(arm64_variants_with_dotprod),
+ variant);
if (!needs_a53_835769_fix) {
// Check to see if this is an expected variant.
@@ -54,6 +119,7 @@
"cortex-a35",
"cortex-a55",
"cortex-a75",
+ "cortex-a76",
"exynos-m1",
"exynos-m2",
"exynos-m3",
@@ -68,31 +134,91 @@
}
}
- // The variants that need a fix for 843419 are the same that need a fix for 835769.
- bool needs_a53_843419_fix = needs_a53_835769_fix;
-
- return Arm64FeaturesUniquePtr(
- new Arm64InstructionSetFeatures(needs_a53_835769_fix, needs_a53_843419_fix));
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+ needs_a53_843419_fix,
+ has_crc,
+ has_lse,
+ has_fp16,
+ has_dotprod));
}
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool is_a53 = (bitmap & kA53Bitfield) != 0;
- return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+ bool has_crc = (bitmap & kCRCBitField) != 0;
+ bool has_lse = (bitmap & kLSEBitField) != 0;
+ bool has_fp16 = (bitmap & kFP16BitField) != 0;
+ bool has_dotprod = (bitmap & kDotProdBitField) != 0;
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53,
+ is_a53,
+ has_crc,
+ has_lse,
+ has_fp16,
+ has_dotprod));
}
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
- const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+ // For more details about ARM feature macros, refer to
+ // Arm C Language Extensions Documentation (ACLE).
+ // https://developer.arm.com/docs/101028/latest
+ bool needs_a53_835769_fix = false;
+ bool needs_a53_843419_fix = needs_a53_835769_fix;
+ bool has_crc = false;
+ bool has_lse = false;
+ bool has_fp16 = false;
+ bool has_dotprod = false;
+
+#if defined (__ARM_FEATURE_CRC32)
+ has_crc = true;
+#endif
+
+#if defined (__ARM_ARCH_8_1A__) || defined (__ARM_ARCH_8_2A__)
+ // There is no specific ACLE macro defined for ARMv8.1 LSE features.
+ has_lse = true;
+#endif
+
+#if defined (__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) || defined (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ has_fp16 = true;
+#endif
+
+#if defined (__ARM_FEATURE_DOTPROD)
+ has_dotprod = true;
+#endif
+
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+ needs_a53_843419_fix,
+ has_crc,
+ has_lse,
+ has_fp16,
+ has_dotprod));
}
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
- const bool is_a53 = true; // Conservative default.
- return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+ UNIMPLEMENTED(WARNING);
+ return FromCppDefines();
}
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromHwcap() {
- const bool is_a53 = true; // Pessimistically assume all ARM64s are A53s.
- return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53, is_a53));
+ bool needs_a53_835769_fix = false; // No HWCAP for this.
+ bool needs_a53_843419_fix = false; // No HWCAP for this.
+ bool has_crc = false;
+ bool has_lse = false;
+ bool has_fp16 = false;
+ bool has_dotprod = false;
+
+#if defined(ART_TARGET_ANDROID) && defined(__aarch64__)
+ uint64_t hwcaps = getauxval(AT_HWCAP);
+ has_crc = hwcaps & HWCAP_CRC32 ? true : false;
+ has_lse = hwcaps & HWCAP_ATOMICS ? true : false;
+ has_fp16 = hwcaps & HWCAP_FPHP ? true : false;
+ has_dotprod = hwcaps & HWCAP_ASIMDDP ? true : false;
+#endif
+
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
+ needs_a53_843419_fix,
+ has_crc,
+ has_lse,
+ has_fp16,
+ has_dotprod));
}
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
@@ -106,11 +232,33 @@
}
const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
return fix_cortex_a53_835769_ == other_as_arm64->fix_cortex_a53_835769_ &&
- fix_cortex_a53_843419_ == other_as_arm64->fix_cortex_a53_843419_;
+ fix_cortex_a53_843419_ == other_as_arm64->fix_cortex_a53_843419_ &&
+ has_crc_ == other_as_arm64->has_crc_ &&
+ has_lse_ == other_as_arm64->has_lse_ &&
+ has_fp16_ == other_as_arm64->has_fp16_ &&
+ has_dotprod_ == other_as_arm64->has_dotprod_;
+}
+
+bool Arm64InstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
+ if (InstructionSet::kArm64 != other->GetInstructionSet()) {
+ return false;
+ }
+ // Currently 'default' feature is cortex-a53 with fixes 835769 and 843419.
+ // Newer CPUs are not required to have such features,
+ // so these two a53 fix features are not tested for HasAtLeast.
+ const Arm64InstructionSetFeatures* other_as_arm64 = other->AsArm64InstructionSetFeatures();
+ return (has_crc_ || !other_as_arm64->has_crc_)
+ && (has_lse_ || !other_as_arm64->has_lse_)
+ && (has_fp16_ || !other_as_arm64->has_fp16_)
+ && (has_dotprod_ || !other_as_arm64->has_dotprod_);
}
uint32_t Arm64InstructionSetFeatures::AsBitmap() const {
- return (fix_cortex_a53_835769_ ? kA53Bitfield : 0);
+ return (fix_cortex_a53_835769_ ? kA53Bitfield : 0)
+ | (has_crc_ ? kCRCBitField : 0)
+ | (has_lse_ ? kLSEBitField: 0)
+ | (has_fp16_ ? kFP16BitField: 0)
+ | (has_dotprod_ ? kDotProdBitField : 0);
}
std::string Arm64InstructionSetFeatures::GetFeatureString() const {
@@ -120,26 +268,100 @@
} else {
result += "-a53";
}
+ if (has_crc_) {
+ result += ",crc";
+ } else {
+ result += ",-crc";
+ }
+ if (has_lse_) {
+ result += ",lse";
+ } else {
+ result += ",-lse";
+ }
+ if (has_fp16_) {
+ result += ",fp16";
+ } else {
+ result += ",-fp16";
+ }
+ if (has_dotprod_) {
+ result += ",dotprod";
+ } else {
+ result += ",-dotprod";
+ }
return result;
}
std::unique_ptr<const InstructionSetFeatures>
Arm64InstructionSetFeatures::AddFeaturesFromSplitString(
const std::vector<std::string>& features, std::string* error_msg) const {
+ // This 'features' string is from '--instruction-set-features=' option in ART.
+ // These ARMv8.x feature strings align with those introduced in other compilers:
+ // https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html
+ // User can also use armv8.x-a to select group of features:
+ // armv8.1-a is equivalent to crc,lse
+ // armv8.2-a is equivalent to crc,lse,fp16
+ // armv8.3-a is equivalent to crc,lse,fp16
+ // armv8.4-a is equivalent to crc,lse,fp16,dotprod
+ // For detailed optional & mandatory features support in armv8.x-a,
+ // please refer to section 'A1.7 ARMv8 architecture extensions' in
+ // ARM Architecture Reference Manual ARMv8 document:
+ // https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs/ddi0487/latest/
+ // arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile/
bool is_a53 = fix_cortex_a53_835769_;
+ bool has_crc = has_crc_;
+ bool has_lse = has_lse_;
+ bool has_fp16 = has_fp16_;
+ bool has_dotprod = has_dotprod_;
for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = android::base::Trim(*i);
if (feature == "a53") {
is_a53 = true;
} else if (feature == "-a53") {
is_a53 = false;
+ } else if (feature == "crc") {
+ has_crc = true;
+ } else if (feature == "-crc") {
+ has_crc = false;
+ } else if (feature == "lse") {
+ has_lse = true;
+ } else if (feature == "-lse") {
+ has_lse = false;
+ } else if (feature == "fp16") {
+ has_fp16 = true;
+ } else if (feature == "-fp16") {
+ has_fp16 = false;
+ } else if (feature == "dotprod") {
+ has_dotprod = true;
+ } else if (feature == "-dotprod") {
+ has_dotprod = false;
+ } else if (feature == "armv8.1-a") {
+ has_crc = true;
+ has_lse = true;
+ } else if (feature == "armv8.2-a") {
+ has_crc = true;
+ has_lse = true;
+ has_fp16 = true;
+ } else if (feature == "armv8.3-a") {
+ has_crc = true;
+ has_lse = true;
+ has_fp16 = true;
+ } else if (feature == "armv8.4-a") {
+ has_crc = true;
+ has_lse = true;
+ has_fp16 = true;
+ has_dotprod = true;
} else {
*error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
return nullptr;
}
}
return std::unique_ptr<const InstructionSetFeatures>(
- new Arm64InstructionSetFeatures(is_a53, is_a53));
+ new Arm64InstructionSetFeatures(is_a53, // erratum 835769
+ is_a53, // erratum 843419
+ has_crc,
+ has_lse,
+ has_fp16,
+ has_dotprod));
}
} // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 163a2d8..4ec8fa2 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -49,6 +49,11 @@
bool Equals(const InstructionSetFeatures* other) const override;
+ // Note that newer CPUs do not have a53 erratum 835769 and 843419,
+ // so the two a53 fix features (fix_cortex_a53_835769 and fix_cortex_a53_843419)
+ // are not tested for HasAtLeast.
+ bool HasAtLeast(const InstructionSetFeatures* other) const override;
+
InstructionSet GetInstructionSet() const override {
return InstructionSet::kArm64;
}
@@ -68,6 +73,23 @@
return fix_cortex_a53_843419_;
}
+ bool HasCRC() const {
+ return has_crc_;
+ }
+
+ bool HasLSE() const {
+ return has_lse_;
+ }
+
+ bool HasFP16() const {
+ return has_fp16_;
+ }
+
+ // Are Dot Product instructions (UDOT/SDOT) available?
+ bool HasDotProd() const {
+ return has_dotprod_;
+ }
+
virtual ~Arm64InstructionSetFeatures() {}
protected:
@@ -77,19 +99,36 @@
std::string* error_msg) const override;
private:
- Arm64InstructionSetFeatures(bool needs_a53_835769_fix, bool needs_a53_843419_fix)
+ Arm64InstructionSetFeatures(bool needs_a53_835769_fix,
+ bool needs_a53_843419_fix,
+ bool has_crc,
+ bool has_lse,
+ bool has_fp16,
+ bool has_dotprod)
: InstructionSetFeatures(),
fix_cortex_a53_835769_(needs_a53_835769_fix),
- fix_cortex_a53_843419_(needs_a53_843419_fix) {
+ fix_cortex_a53_843419_(needs_a53_843419_fix),
+ has_crc_(has_crc),
+ has_lse_(has_lse),
+ has_fp16_(has_fp16),
+ has_dotprod_(has_dotprod) {
}
// Bitmap positions for encoding features as a bitmap.
enum {
kA53Bitfield = 1 << 0,
+ kCRCBitField = 1 << 1,
+ kLSEBitField = 1 << 2,
+ kFP16BitField = 1 << 3,
+ kDotProdBitField = 1 << 4,
};
const bool fix_cortex_a53_835769_;
const bool fix_cortex_a53_843419_;
+ const bool has_crc_; // optional in ARMv8.0, mandatory in ARMv8.1.
+ const bool has_lse_; // ARMv8.1 Large System Extensions.
+ const bool has_fp16_; // ARMv8.2 FP16 extensions.
+ const bool has_dotprod_; // optional in ARMv8.2, mandatory in ARMv8.4.
DISALLOW_COPY_AND_ASSIGN(Arm64InstructionSetFeatures);
};
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index b946f4f..99d6b0d 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -28,32 +28,37 @@
ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
EXPECT_EQ(arm64_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
- EXPECT_STREQ("a53", arm64_features->GetFeatureString().c_str());
- EXPECT_EQ(arm64_features->AsBitmap(), 1U);
+ EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", arm64_features->GetFeatureString().c_str());
+ EXPECT_EQ(arm64_features->AsBitmap(), 3U);
std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a57", &error_msg));
ASSERT_TRUE(cortex_a57_features.get() != nullptr) << error_msg;
EXPECT_EQ(cortex_a57_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
- EXPECT_STREQ("a53", cortex_a57_features->GetFeatureString().c_str());
- EXPECT_EQ(cortex_a57_features->AsBitmap(), 1U);
+ EXPECT_TRUE(cortex_a57_features->HasAtLeast(arm64_features.get()));
+ EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a57_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a57_features->AsBitmap(), 3U);
std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a73", &error_msg));
ASSERT_TRUE(cortex_a73_features.get() != nullptr) << error_msg;
EXPECT_EQ(cortex_a73_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a73_features->Equals(cortex_a73_features.get()));
- EXPECT_STREQ("a53", cortex_a73_features->GetFeatureString().c_str());
- EXPECT_EQ(cortex_a73_features->AsBitmap(), 1U);
+ EXPECT_TRUE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasCRC());
+ EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a73_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a73_features->AsBitmap(), 3U);
std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a35", &error_msg));
ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
EXPECT_EQ(cortex_a35_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
- EXPECT_STREQ("-a53", cortex_a35_features->GetFeatureString().c_str());
- EXPECT_EQ(cortex_a35_features->AsBitmap(), 0U);
+ EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", cortex_a35_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a35_features->AsBitmap(), 2U);
std::unique_ptr<const InstructionSetFeatures> kryo_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "kryo", &error_msg));
@@ -62,28 +67,107 @@
EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
- EXPECT_STREQ("-a53", kryo_features->GetFeatureString().c_str());
- EXPECT_EQ(kryo_features->AsBitmap(), 0U);
+ EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 2U);
std::unique_ptr<const InstructionSetFeatures> cortex_a55_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a55", &error_msg));
ASSERT_TRUE(cortex_a55_features.get() != nullptr) << error_msg;
EXPECT_EQ(cortex_a55_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a55_features->Equals(cortex_a55_features.get()));
- EXPECT_TRUE(cortex_a55_features->Equals(cortex_a35_features.get()));
+ EXPECT_FALSE(cortex_a55_features->Equals(cortex_a35_features.get()));
EXPECT_FALSE(cortex_a55_features->Equals(cortex_a57_features.get()));
- EXPECT_STREQ("-a53", cortex_a55_features->GetFeatureString().c_str());
- EXPECT_EQ(cortex_a55_features->AsBitmap(), 0U);
+ EXPECT_TRUE(cortex_a35_features->HasAtLeast(arm64_features.get()));
+ EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a55_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a55_features->AsBitmap(), 30U);
std::unique_ptr<const InstructionSetFeatures> cortex_a75_features(
InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a75", &error_msg));
ASSERT_TRUE(cortex_a75_features.get() != nullptr) << error_msg;
EXPECT_EQ(cortex_a75_features->GetInstructionSet(), InstructionSet::kArm64);
EXPECT_TRUE(cortex_a75_features->Equals(cortex_a75_features.get()));
- EXPECT_TRUE(cortex_a75_features->Equals(cortex_a35_features.get()));
+ EXPECT_FALSE(cortex_a75_features->Equals(cortex_a35_features.get()));
EXPECT_FALSE(cortex_a75_features->Equals(cortex_a57_features.get()));
- EXPECT_STREQ("-a53", cortex_a75_features->GetFeatureString().c_str());
- EXPECT_EQ(cortex_a75_features->AsBitmap(), 0U);
+ EXPECT_TRUE(cortex_a75_features->HasAtLeast(arm64_features.get()));
+ EXPECT_TRUE(cortex_a75_features->HasAtLeast(cortex_a55_features.get()));
+ EXPECT_FALSE(cortex_a35_features->HasAtLeast(cortex_a75_features.get()));
+ EXPECT_FALSE(cortex_a75_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+ EXPECT_FALSE(cortex_a75_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+ EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasCRC());
+ EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a75_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a75_features->AsBitmap(), 30U);
+
+ std::unique_ptr<const InstructionSetFeatures> cortex_a76_features(
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "cortex-a76", &error_msg));
+ ASSERT_TRUE(cortex_a76_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(cortex_a76_features->GetInstructionSet(), InstructionSet::kArm64);
+ EXPECT_TRUE(cortex_a76_features->Equals(cortex_a76_features.get()));
+ EXPECT_FALSE(cortex_a76_features->Equals(cortex_a35_features.get()));
+ EXPECT_FALSE(cortex_a76_features->Equals(cortex_a57_features.get()));
+ EXPECT_TRUE(cortex_a76_features->Equals(cortex_a75_features.get()));
+ EXPECT_TRUE(cortex_a76_features->HasAtLeast(arm64_features.get()));
+ EXPECT_TRUE(cortex_a76_features->HasAtLeast(cortex_a55_features.get()));
+ EXPECT_FALSE(cortex_a35_features->HasAtLeast(cortex_a76_features.get()));
+ EXPECT_FALSE(cortex_a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+ EXPECT_FALSE(cortex_a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+ EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasCRC());
+ EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a76_features->GetFeatureString().c_str());
+ EXPECT_EQ(cortex_a76_features->AsBitmap(), 30U);
+}
+
+TEST(Arm64InstructionSetFeaturesTest, Arm64AddFeaturesFromString) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> base_features(
+ InstructionSetFeatures::FromVariant(InstructionSet::kArm64, "generic", &error_msg));
+ ASSERT_TRUE(base_features.get() != nullptr) << error_msg;
+
+ // Build features for a Cortex-A76 processor (with ARMv8.2 and Dot Product exentions support).
+ std::unique_ptr<const InstructionSetFeatures> a76_features(
+ base_features->AddFeaturesFromString("-a53,armv8.2-a,dotprod", &error_msg));
+ ASSERT_TRUE(a76_features.get() != nullptr) << error_msg;
+ ASSERT_EQ(a76_features->GetInstructionSet(), InstructionSet::kArm64);
+ EXPECT_TRUE(a76_features->Equals(a76_features.get()));
+ EXPECT_FALSE(a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+ EXPECT_FALSE(a76_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+ EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasCRC());
+ EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", a76_features->GetFeatureString().c_str());
+ EXPECT_EQ(a76_features->AsBitmap(), 30U);
+
+ // Build features for a default ARM64 processor.
+ std::unique_ptr<const InstructionSetFeatures> generic_features(
+ base_features->AddFeaturesFromString("default", &error_msg));
+ ASSERT_TRUE(generic_features.get() != nullptr) << error_msg;
+ ASSERT_EQ(generic_features->GetInstructionSet(), InstructionSet::kArm64);
+ EXPECT_TRUE(generic_features->Equals(generic_features.get()));
+ EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", generic_features->GetFeatureString().c_str());
+ EXPECT_EQ(generic_features->AsBitmap(), 3U);
+
+ // Build features for a ARM64 processor that supports up to ARMv8.2.
+ std::unique_ptr<const InstructionSetFeatures> armv8_2a_cpu_features(
+ base_features->AddFeaturesFromString("-a53,armv8.2-a", &error_msg));
+ ASSERT_TRUE(armv8_2a_cpu_features.get() != nullptr) << error_msg;
+ ASSERT_EQ(armv8_2a_cpu_features->GetInstructionSet(), InstructionSet::kArm64);
+ EXPECT_TRUE(armv8_2a_cpu_features->Equals(armv8_2a_cpu_features.get()));
+ EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_835769());
+ EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->NeedFixCortexA53_843419());
+ EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasCRC());
+ EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasLSE());
+ EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasFP16());
+ EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasDotProd());
+ EXPECT_STREQ("-a53,crc,lse,fp16,-dotprod", armv8_2a_cpu_features->GetFeatureString().c_str());
+ EXPECT_EQ(armv8_2a_cpu_features->AsBitmap(), 14U);
}
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 48ddc69..18ddcc0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -103,7 +103,7 @@
bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class)
REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DeclaringClassOffset() {
+ static constexpr MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
@@ -118,7 +118,7 @@
access_flags_.store(new_access_flags, std::memory_order_relaxed);
}
- static MemberOffset AccessFlagsOffset() {
+ static constexpr MemberOffset AccessFlagsOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, access_flags_));
}
@@ -351,11 +351,11 @@
method_index_ = new_method_index;
}
- static MemberOffset DexMethodIndexOffset() {
+ static constexpr MemberOffset DexMethodIndexOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, dex_method_index_));
}
- static MemberOffset MethodIndexOffset() {
+ static constexpr MemberOffset MethodIndexOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
}
@@ -431,16 +431,16 @@
void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DataOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
PtrSizedFields, data_) / sizeof(void*) * static_cast<size_t>(pointer_size));
}
- static MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset EntryPointFromJniOffset(PointerSize pointer_size) {
return DataOffset(pointer_size);
}
- static MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset EntryPointFromQuickCompiledCodeOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*)
* static_cast<size_t>(pointer_size));
@@ -652,7 +652,7 @@
return hotness_count_;
}
- static MemberOffset HotnessCountOffset() {
+ static constexpr MemberOffset HotnessCountOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
}
@@ -725,6 +725,10 @@
ALWAYS_INLINE CodeItemDebugInfoAccessor DexInstructionDebugInfo()
REQUIRES_SHARED(Locks::mutator_lock_);
+ GcRoot<mirror::Class>& DeclaringClassRoot() {
+ return declaring_class_;
+ }
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index e65c194..eac9856 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -20,207 +20,7 @@
#include "heap_poisoning.h"
#include "read_barrier_config.h"
-// To generate tests related to the constants in this header, either define ADD_TEST_EQ before
-// including, or use asm_support_check.h.
-#ifndef ADD_TEST_EQ // Allow #include-r to replace with their own.
-#define DEFINED_ADD_TEST_EQ 1
-#define ADD_TEST_EQ(x, y)
-#endif
-
-#if defined(__LP64__)
-#define POINTER_SIZE_SHIFT 3
-#define POINTER_SIZE art::PointerSize::k64
-#else
-#define POINTER_SIZE_SHIFT 2
-#define POINTER_SIZE art::PointerSize::k32
-#endif
-ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
- static_cast<size_t>(__SIZEOF_POINTER__))
-
-// Import platform-independent constant defines from our autogenerated list.
-// Export new defines (for assembly use) by editing cpp-define-generator def files.
-#define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "asm_support_gen.h"
-#undef DEFINE_CHECK_EQ
-
-// Offset of field Thread::tlsPtr_.exception.
-#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_EXCEPTION_OFFSET,
- art::Thread::ExceptionOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.managed_stack.top_quick_frame_.
-#define THREAD_TOP_QUICK_FRAME_OFFSET (THREAD_CARD_TABLE_OFFSET + (3 * __SIZEOF_POINTER__))
-ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
- art::Thread::TopOfManagedStackOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.self.
-#define THREAD_SELF_OFFSET (THREAD_CARD_TABLE_OFFSET + (9 * __SIZEOF_POINTER__))
-ADD_TEST_EQ(THREAD_SELF_OFFSET,
- art::Thread::SelfOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 34 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
- art::Thread::ThreadLocalPosOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_end.
-#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
- art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
- art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
-
-// Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET \
- (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 166) * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
- art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.mterp_default_ibase.
-#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_CURRENT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
- art::Thread::MterpDefaultIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
-#define THREAD_ALT_IBASE_OFFSET (THREAD_DEFAULT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
- art::Thread::MterpAltIBaseOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_ALT_IBASE_OFFSET + __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
- art::Thread::RosAllocRunsOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
-#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 16 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
- art::Thread::ThreadLocalAllocStackTopOffset<POINTER_SIZE>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
-#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 17 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
- art::Thread::ThreadLocalAllocStackEndOffset<POINTER_SIZE>().Int32Value())
-
-// Offsets within ShadowFrame.
-#define SHADOWFRAME_LINK_OFFSET 0
-ADD_TEST_EQ(SHADOWFRAME_LINK_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::LinkOffset()))
-#define SHADOWFRAME_METHOD_OFFSET (SHADOWFRAME_LINK_OFFSET + 1 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_METHOD_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::MethodOffset()))
-#define SHADOWFRAME_RESULT_REGISTER_OFFSET (SHADOWFRAME_LINK_OFFSET + 2 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_RESULT_REGISTER_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::ResultRegisterOffset()))
-#define SHADOWFRAME_DEX_PC_PTR_OFFSET (SHADOWFRAME_LINK_OFFSET + 3 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_DEX_PC_PTR_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::DexPCPtrOffset()))
-#define SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET (SHADOWFRAME_LINK_OFFSET + 4 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::DexInstructionsOffset()))
-#define SHADOWFRAME_LOCK_COUNT_DATA_OFFSET (SHADOWFRAME_LINK_OFFSET + 5 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::LockCountDataOffset()))
-#define SHADOWFRAME_NUMBER_OF_VREGS_OFFSET (SHADOWFRAME_LINK_OFFSET + 6 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::NumberOfVRegsOffset()))
-#define SHADOWFRAME_DEX_PC_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 4)
-ADD_TEST_EQ(SHADOWFRAME_DEX_PC_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::DexPCOffset()))
-#define SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 8)
-ADD_TEST_EQ(SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::CachedHotnessCountdownOffset()))
-#define SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 10)
-ADD_TEST_EQ(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::HotnessCountdownOffset()))
-#define SHADOWFRAME_VREGS_OFFSET (SHADOWFRAME_NUMBER_OF_VREGS_OFFSET + 16)
-ADD_TEST_EQ(SHADOWFRAME_VREGS_OFFSET,
- static_cast<int32_t>(art::ShadowFrame::VRegsOffset()))
-
-#if defined(USE_BROOKS_READ_BARRIER)
-#define MIRROR_OBJECT_HEADER_SIZE 16
-#else
-#define MIRROR_OBJECT_HEADER_SIZE 8
-#endif
-ADD_TEST_EQ(size_t(MIRROR_OBJECT_HEADER_SIZE), sizeof(art::mirror::Object))
-
-// Offsets within java.lang.Class.
-#define MIRROR_CLASS_COMPONENT_TYPE_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
- art::mirror::Class::ComponentTypeOffset().Int32Value())
-#define MIRROR_CLASS_IF_TABLE_OFFSET (16 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_IF_TABLE_OFFSET,
- art::mirror::Class::IfTableOffset().Int32Value())
-#define MIRROR_CLASS_ACCESS_FLAGS_OFFSET (56 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
- art::mirror::Class::AccessFlagsOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (88 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
- art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET (92 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
- art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
- art::mirror::Class::PrimitiveTypeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
- art::mirror::Class::StatusOffset().Int32Value())
-
-#define PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT 16
-ADD_TEST_EQ(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
- static_cast<int>(art::mirror::Class::kPrimitiveTypeSizeShiftShift))
-
-// Array offsets.
-#define MIRROR_ARRAY_LENGTH_OFFSET MIRROR_OBJECT_HEADER_SIZE
-ADD_TEST_EQ(MIRROR_ARRAY_LENGTH_OFFSET, art::mirror::Array::LengthOffset().Int32Value())
-
-#define MIRROR_CHAR_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_CHAR_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
-
-#define MIRROR_BOOLEAN_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
-
-#define MIRROR_BYTE_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_BYTE_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
-
-#define MIRROR_SHORT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_SHORT_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
-
-#define MIRROR_INT_ARRAY_DATA_OFFSET MIRROR_CHAR_ARRAY_DATA_OFFSET
-ADD_TEST_EQ(MIRROR_INT_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
-
-#define MIRROR_WIDE_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_WIDE_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
-
-#define MIRROR_OBJECT_ARRAY_DATA_OFFSET (4 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(
- sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
-
-#define MIRROR_OBJECT_ARRAY_COMPONENT_SIZE 4
-ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE),
- sizeof(art::mirror::HeapReference<art::mirror::Object>))
-
-#define MIRROR_LONG_ARRAY_DATA_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_LONG_ARRAY_DATA_OFFSET,
- art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
-
-// Offsets within java.lang.String.
-#define MIRROR_STRING_COUNT_OFFSET MIRROR_OBJECT_HEADER_SIZE
-ADD_TEST_EQ(MIRROR_STRING_COUNT_OFFSET, art::mirror::String::CountOffset().Int32Value())
-
-#define MIRROR_STRING_VALUE_OFFSET (8 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_STRING_VALUE_OFFSET, art::mirror::String::ValueOffset().Int32Value())
-
-// String compression feature.
-#define STRING_COMPRESSION_FEATURE 1
-ADD_TEST_EQ(STRING_COMPRESSION_FEATURE, art::mirror::kUseStringCompression);
-
-#ifdef DEFINED_ADD_TEST_EQ
-#undef ADD_TEST_EQ
-#undef DEFINED_ADD_TEST_EQ
-#endif
+// Automatically generated header based on the asm_defines.def file.
+#include "asm_defines.h"
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/asm_support_check.h b/runtime/asm_support_check.h
deleted file mode 100644
index 3163506..0000000
--- a/runtime/asm_support_check.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ASM_SUPPORT_CHECK_H_
-#define ART_RUNTIME_ASM_SUPPORT_CHECK_H_
-
-#include "art_method.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "gc/accounting/card_table.h"
-#include "gc/allocator/rosalloc.h"
-#include "gc/heap.h"
-#include "jit/jit.h"
-#include "lock_word.h"
-#include "mirror/class.h"
-#include "mirror/dex_cache.h"
-#include "mirror/string.h"
-#include "runtime.h"
-#include "stack.h"
-#include "thread.h"
-#include "utils/dex_cache_arrays_layout.h"
-
-#ifndef ADD_TEST_EQ
-#define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
-#endif
-
-#ifndef ASM_SUPPORT_CHECK_RETURN_TYPE
-#define ASM_SUPPORT_CHECK_RETURN_TYPE void
-#endif
-
-// Prepare for re-include of asm_support.h.
-#ifdef ART_RUNTIME_ASM_SUPPORT_H_
-#undef ART_RUNTIME_ASM_SUPPORT_H_
-#endif
-
-namespace art {
-
-static inline ASM_SUPPORT_CHECK_RETURN_TYPE CheckAsmSupportOffsetsAndSizes() {
-#ifdef ASM_SUPPORT_CHECK_HEADER
- ASM_SUPPORT_CHECK_HEADER
-#endif
-
-#include "asm_support.h"
-
-#ifdef ASM_SUPPORT_CHECK_FOOTER
- ASM_SUPPORT_CHECK_FOOTER
-#endif
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_ASM_SUPPORT_CHECK_H_
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 88075ba..5ec24bc 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -32,7 +32,7 @@
count1_(count1),
count2_(count2) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
LOG(INFO) << "Before barrier" << *self;
++*count1_;
barrier_->Wait(self);
@@ -40,7 +40,7 @@
LOG(INFO) << "After barrier" << *self;
}
- virtual void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -91,7 +91,7 @@
count_(count),
subtasks_(subtasks) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
for (size_t i = 0; i < subtasks_; ++i) {
++*count_;
// Pass through to next subtask.
@@ -99,7 +99,7 @@
}
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
private:
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 28b2912..c11e3d1 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -227,18 +227,15 @@
// No mutexes have been created yet during at startup.
return;
}
- typedef std::set<BaseMutex*>::const_iterator It;
os << "(Contended)\n";
- for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
- BaseMutex* mutex = *it;
+ for (const BaseMutex* mutex : *all_mutexes) {
if (mutex->HasEverContended()) {
mutex->Dump(os);
os << "\n";
}
}
os << "(Never contented)\n";
- for (It it = all_mutexes->begin(); it != all_mutexes->end(); ++it) {
- BaseMutex* mutex = *it;
+ for (const BaseMutex* mutex : *all_mutexes) {
if (!mutex->HasEverContended()) {
mutex->Dump(os);
os << "\n";
@@ -1031,7 +1028,11 @@
guard_.recursion_count_ = 0;
timespec ts;
InitTimeSpec(true, clock, ms, ns, &ts);
- int rc = TEMP_FAILURE_RETRY(pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts));
+ int rc;
+ while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) {
+ continue;
+ }
+
if (rc == ETIMEDOUT) {
timed_out = true;
} else if (rc != 0) {
diff --git a/runtime/cha.cc b/runtime/cha.cc
index 3ea920d..d8cb525 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -507,7 +507,8 @@
return;
}
DCHECK(!single_impl->IsAbstract());
- if (single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) {
+ if ((single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) &&
+ !implementation_method->IsDefaultConflicting()) {
// Same implementation. Since implementation_method may be a copy of a default
// method, we need to check the declaring class for equality.
return;
@@ -543,7 +544,10 @@
method->SetHasSingleImplementation(true);
DCHECK(method->GetSingleImplementation(pointer_size) == nullptr);
}
- } else {
+ // Default conflicting methods cannot be treated with single implementations,
+ // as we need to call them (and not inline them) in case of ICCE.
+ // See class_linker.cc:EnsureThrowsInvocationError.
+ } else if (!method->IsDefaultConflicting()) {
method->SetHasSingleImplementation(true);
// Single implementation of non-abstract method is itself.
DCHECK_EQ(method->GetSingleImplementation(pointer_size), method);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 65f05d9..7549c04 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -79,7 +79,7 @@
#include "image-inl.h"
#include "imt_conflict_table.h"
#include "imtable-inl.h"
-#include "intern_table.h"
+#include "intern_table-inl.h"
#include "interpreter/interpreter.h"
#include "jit/debugger_interface.h"
#include "jit/jit.h"
@@ -104,6 +104,8 @@
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/object_reference.h"
+#include "mirror/object_reference-inl.h"
#include "mirror/proxy.h"
#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
@@ -307,7 +309,7 @@
return lhs.size < rhs.size || (lhs.size == rhs.size && lhs.start_offset > rhs.start_offset);
}
};
-typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
+using FieldGaps = std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator>;
// Adds largest aligned gaps to queue of gaps.
static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
@@ -431,6 +433,8 @@
heap->IncrementDisableMovingGC(self);
StackHandleScope<64> hs(self); // 64 is picked arbitrarily.
auto class_class_size = mirror::Class::ClassClassSize(image_pointer_size_);
+ // Allocate the object as non-movable so that there are no cases where Object::IsClass returns
+ // the incorrect result when comparing to-space vs from-space.
Handle<mirror::Class> java_lang_Class(hs.NewHandle(ObjPtr<mirror::Class>::DownCast(MakeObjPtr(
heap->AllocNonMovableObject<true>(self, nullptr, class_class_size, VoidFunctor())))));
CHECK(java_lang_Class != nullptr);
@@ -483,9 +487,17 @@
mirror::ObjectArray<mirror::Object>::ClassSize(image_pointer_size_))));
object_array_class->SetComponentType(java_lang_Object.Get());
- // Setup String.
+ // Setup java.lang.String.
+ //
+ // We make this class non-movable for the unlikely case where it were to be
+ // moved by a sticky-bit (minor) collection when using the Generational
+ // Concurrent Copying (CC) collector, potentially creating a stale reference
+ // in the `klass_` field of one of its instances allocated in the Large-Object
+ // Space (LOS) -- see the comment about the dirty card scanning logic in
+ // art::gc::collector::ConcurrentCopying::MarkingPhase.
Handle<mirror::Class> java_lang_String(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
+ AllocClass</* kMovable */ false>(
+ self, java_lang_Class.Get(), mirror::String::ClassSize(image_pointer_size_))));
java_lang_String->SetStringClass();
mirror::Class::SetStatus(java_lang_String, ClassStatus::kResolved, self);
@@ -528,13 +540,13 @@
// Create int array type for native pointer arrays (for example vtables) on 32-bit archs.
Handle<mirror::Class> int_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_))));
+ AllocPrimitiveArrayClass(self, java_lang_Class.Get())));
int_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveInt, this));
SetClassRoot(ClassRoot::kIntArrayClass, int_array_class.Get());
// Create long array type for native pointer arrays (for example vtables) on 64-bit archs.
Handle<mirror::Class> long_array_class(hs.NewHandle(
- AllocClass(self, java_lang_Class.Get(), mirror::Array::ClassSize(image_pointer_size_))));
+ AllocPrimitiveArrayClass(self, java_lang_Class.Get())));
long_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveLong, this));
SetClassRoot(ClassRoot::kLongArrayClass, long_array_class.Get());
@@ -610,20 +622,29 @@
CHECK_EQ(dalvik_system_ClassExt->GetObjectSize(), mirror::ClassExt::InstanceSize());
// Setup the primitive array type classes - can't be done until Object has a vtable.
- SetClassRoot(ClassRoot::kBooleanArrayClass, FindSystemClass(self, "[Z"));
+ AllocAndSetPrimitiveArrayClassRoot(self,
+ java_lang_Class.Get(),
+ ClassRoot::kBooleanArrayClass,
+ ClassRoot::kPrimitiveBoolean,
+ "[Z");
- SetClassRoot(ClassRoot::kByteArrayClass, FindSystemClass(self, "[B"));
+ AllocAndSetPrimitiveArrayClassRoot(
+ self, java_lang_Class.Get(), ClassRoot::kByteArrayClass, ClassRoot::kPrimitiveByte, "[B");
- SetClassRoot(ClassRoot::kCharArrayClass, FindSystemClass(self, "[C"));
+ AllocAndSetPrimitiveArrayClassRoot(
+ self, java_lang_Class.Get(), ClassRoot::kCharArrayClass, ClassRoot::kPrimitiveChar, "[C");
- SetClassRoot(ClassRoot::kShortArrayClass, FindSystemClass(self, "[S"));
+ AllocAndSetPrimitiveArrayClassRoot(
+ self, java_lang_Class.Get(), ClassRoot::kShortArrayClass, ClassRoot::kPrimitiveShort, "[S");
CheckSystemClass(self, int_array_class, "[I");
CheckSystemClass(self, long_array_class, "[J");
- SetClassRoot(ClassRoot::kFloatArrayClass, FindSystemClass(self, "[F"));
+ AllocAndSetPrimitiveArrayClassRoot(
+ self, java_lang_Class.Get(), ClassRoot::kFloatArrayClass, ClassRoot::kPrimitiveFloat, "[F");
- SetClassRoot(ClassRoot::kDoubleArrayClass, FindSystemClass(self, "[D"));
+ AllocAndSetPrimitiveArrayClassRoot(
+ self, java_lang_Class.Get(), ClassRoot::kDoubleArrayClass, ClassRoot::kPrimitiveDouble, "[D");
// Run Class through FindSystemClass. This initializes the dex_cache_ fields and register it
// in class_table_.
@@ -940,7 +961,6 @@
runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
DCHECK(!oat_files.empty());
const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
- CHECK_EQ(default_oat_header.GetImageFileLocationOatDataBegin(), 0U);
const char* image_file_location = oat_files[0]->GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
CHECK(image_file_location == nullptr || *image_file_location == 0);
@@ -1141,7 +1161,7 @@
VerifyDeclaringClassVisitor() REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: live_bitmap_(Runtime::Current()->GetHeap()->GetLiveBitmap()) {}
- virtual void Visit(ArtMethod* method)
+ void Visit(ArtMethod* method) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked();
if (klass != nullptr) {
@@ -1153,18 +1173,31 @@
gc::accounting::HeapBitmap* const live_bitmap_;
};
-class FixupInternVisitor {
+/*
+ * A class used to ensure that all strings in an AppImage have been properly
+ * interned.
+ */
+class VerifyStringInterningVisitor {
public:
- ALWAYS_INLINE ObjPtr<mirror::Object> TryInsertIntern(mirror::Object* obj) const
+ explicit VerifyStringInterningVisitor(const gc::space::ImageSpace& space) :
+ uninterned_string_found_(false),
+ space_(space),
+ intern_table_(*Runtime::Current()->GetInternTable()) {}
+
+ ALWAYS_INLINE
+ void TestObject(ObjPtr<mirror::Object> referred_obj) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj != nullptr && obj->IsString()) {
- const auto intern = Runtime::Current()->GetInternTable()->InternStrong(obj->AsString());
- return intern;
+ if (referred_obj != nullptr &&
+ space_.HasAddress(referred_obj.Ptr()) &&
+ referred_obj->IsString()) {
+ ObjPtr<mirror::String> referred_str = referred_obj->AsString();
+ uninterned_string_found_ = uninterned_string_found_ ||
+ (intern_table_.LookupStrong(Thread::Current(), referred_str) != referred_str);
}
- return obj;
}
- ALWAYS_INLINE void VisitRootIfNonNull(
+ ALWAYS_INLINE
+ void VisitRootIfNonNull(
mirror::CompressedReference<mirror::Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
@@ -1172,48 +1205,81 @@
}
}
- ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- root->Assign(TryInsertIntern(root->AsMirrorPtr()));
+ TestObject(root->AsMirrorPtr());
}
// Visit Class Fields
- ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
- MemberOffset offset,
- bool is_static ATTRIBUTE_UNUSED) const
+ ALWAYS_INLINE
+ void operator()(ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
// There could be overlap between ranges, we must avoid visiting the same reference twice.
// Avoid the class field since we already fixed it up in FixupClassVisitor.
if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
// Updating images, don't do a read barrier.
- // Only string fields are fixed, don't do a verify.
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
- offset);
- obj->SetFieldObject<false, false>(offset, TryInsertIntern(ref));
+ ObjPtr<mirror::Object> referred_obj =
+ obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+
+ TestObject(referred_obj);
}
}
void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
ObjPtr<mirror::Reference> ref) const
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
- this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+ operator()(ref, mirror::Reference::ReferentOffset(), false);
}
- void operator()(mirror::Object* obj) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj->IsDexCache()) {
- obj->VisitReferences<true, kVerifyNone, kWithoutReadBarrier>(*this, *this);
- } else {
- // Don't visit native roots for non-dex-cache
- obj->VisitReferences<false, kVerifyNone, kWithoutReadBarrier>(*this, *this);
- }
- }
+ mutable bool uninterned_string_found_;
+ const gc::space::ImageSpace& space_;
+ InternTable& intern_table_;
};
+/*
+ * This function verifies that string references in the AppImage have been
+ * properly interned. To be considered properly interned a reference must
+ * point to the same version of the string that the intern table does.
+ */
+bool VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::mutator_lock_) {
+ const gc::accounting::ContinuousSpaceBitmap* bitmap = space.GetMarkBitmap();
+ const ImageHeader& image_header = space.GetImageHeader();
+ const uint8_t* target_base = space.GetMemMap()->Begin();
+ const ImageSection& objects_section = image_header.GetObjectsSection();
+ uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+ uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
+
+ VerifyStringInterningVisitor visitor(space);
+ bitmap->VisitMarkedRange(objects_begin,
+ objects_end,
+ [&space, &visitor](mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (space.HasAddress(obj)) {
+ if (obj->IsDexCache()) {
+ obj->VisitReferences</* kVisitNativeRoots */ true,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+ } else {
+ // Don't visit native roots for non-dex-cache as they can't contain
+ // native references to strings. This is verified during compilation
+ // by ImageWriter::VerifyNativeGCRootInvariants.
+ obj->VisitReferences</* kVisitNativeRoots */ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(visitor, visitor);
+ }
+ }
+ });
+
+ return !visitor.uninterned_string_found_;
+}
+
// new_class_set is the set of classes that were read from the class table section in the image.
// If there was no class table section, it is null.
// Note: using a class here to avoid having to make ClassLinker internals public.
-class AppImageClassLoadersAndDexCachesHelper {
+class AppImageLoadingHelper {
public:
static void Update(
ClassLinker* class_linker,
@@ -1223,9 +1289,17 @@
ClassTable::ClassSet* new_class_set)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static void AddImageInternTable(gc::space::ImageSpace* space)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ static void UpdateInternStrings(
+ gc::space::ImageSpace* space,
+ const SafeMap<mirror::String*, mirror::String*>& intern_remap)
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
-void AppImageClassLoadersAndDexCachesHelper::Update(
+void AppImageLoadingHelper::Update(
ClassLinker* class_linker,
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1233,6 +1307,8 @@
ClassTable::ClassSet* new_class_set)
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedTrace app_image_timing("AppImage:Updating");
+
Thread* const self = Thread::Current();
gc::Heap* const heap = Runtime::Current()->GetHeap();
const ImageHeader& header = space->GetImageHeader();
@@ -1248,6 +1324,7 @@
CHECK(!class_linker->FindDexCacheDataLocked(*dex_file).IsValid());
class_linker->RegisterDexFileLocked(*dex_file, dex_cache, class_loader.Get());
}
+
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
@@ -1255,17 +1332,20 @@
for (size_t j = 0; j != num_types; ++j) {
// The image space is not yet added to the heap, avoid read barriers.
ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
+
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
auto it = new_class_set->find(ClassTable::TableSlot(klass));
DCHECK(it != new_class_set->end());
DCHECK_EQ(it->Read(), klass);
ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
+
if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
auto it2 = new_class_set->find(ClassTable::TableSlot(super_class));
DCHECK(it2 != new_class_set->end());
DCHECK_EQ(it2->Read(), super_class);
}
+
for (ArtMethod& m : klass->GetDirectMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? class_linker->GetQuickOatCodeFor(&m) : code;
@@ -1276,6 +1356,7 @@
DCHECK_EQ(code, oat_code) << m.PrettyMethod();
}
}
+
for (ArtMethod& m : klass->GetVirtualMethods(kRuntimePointerSize)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
const void* oat_code = m.IsInvokable() ? class_linker->GetQuickOatCodeFor(&m) : code;
@@ -1291,28 +1372,101 @@
}
}
}
+
if (ClassLinker::kAppImageMayContainStrings) {
- // Fixup all the literal strings happens at app images which are supposed to be interned.
- ScopedTrace timing("Fixup String Intern in image and dex_cache");
- const auto& image_header = space->GetImageHeader();
- const auto bitmap = space->GetMarkBitmap(); // bitmap of objects
- const uint8_t* target_base = space->GetMemMap()->Begin();
- const ImageSection& objects_section = image_header.GetObjectsSection();
-
- uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
- uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-
- FixupInternVisitor fixup_intern_visitor;
- bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_intern_visitor);
+ AddImageInternTable(space);
+ DCHECK(VerifyStringInterning(*space));
}
+
if (kVerifyArtMethodDeclaringClasses) {
- ScopedTrace timing("Verify declaring classes");
+ ScopedTrace timing("AppImage:VerifyDeclaringClasses");
ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
VerifyDeclaringClassVisitor visitor;
header.VisitPackedArtMethods(&visitor, space->Begin(), kRuntimePointerSize);
}
}
+void AppImageLoadingHelper::UpdateInternStrings(
+ gc::space::ImageSpace* space,
+ const SafeMap<mirror::String*, mirror::String*>& intern_remap) {
+ const uint8_t* target_base = space->Begin();
+ const ImageSection& sro_section = space->GetImageHeader().GetImageStringReferenceOffsetsSection();
+ const size_t num_string_offsets = sro_section.Size() / sizeof(uint32_t);
+
+ VLOG(image)
+ << "ClassLinker:AppImage:InternStrings:imageStringReferenceOffsetCount = "
+ << num_string_offsets;
+
+ const uint32_t* sro_base =
+ reinterpret_cast<const uint32_t*>(target_base + sro_section.Offset());
+
+ for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
+ if (HasNativeRefTag(sro_base[offset_index])) {
+ void* raw_field_addr = space->Begin() + ClearNativeRefTag(sro_base[offset_index]);
+ mirror::CompressedReference<mirror::Object>* objref_addr =
+ reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(raw_field_addr);
+ mirror::String* referred_string = objref_addr->AsMirrorPtr()->AsString();
+ DCHECK(referred_string != nullptr);
+
+ auto it = intern_remap.find(referred_string);
+ if (it != intern_remap.end()) {
+ objref_addr->Assign(it->second);
+ }
+ } else {
+ void* raw_field_addr = space->Begin() + sro_base[offset_index];
+ mirror::HeapReference<mirror::Object>* objref_addr =
+ reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_field_addr);
+ mirror::String* referred_string = objref_addr->AsMirrorPtr()->AsString();
+ DCHECK(referred_string != nullptr);
+
+ auto it = intern_remap.find(referred_string);
+ if (it != intern_remap.end()) {
+ objref_addr->Assign<false>(it->second);
+ }
+ }
+ }
+}
+
+void AppImageLoadingHelper::AddImageInternTable(gc::space::ImageSpace* space) {
+ // Iterate over the string reference offsets stored in the image and intern
+ // the strings they point to.
+ ScopedTrace timing("AppImage:InternString");
+
+ Thread* const self = Thread::Current();
+ Runtime* const runtime = Runtime::Current();
+ InternTable* const intern_table = runtime->GetInternTable();
+
+ // Add the intern table, removing any conflicts. For conflicts, store the new address in a map
+ // for faster lookup.
+ // TODO: Optimize with a bitmap or bloom filter
+ SafeMap<mirror::String*, mirror::String*> intern_remap;
+ intern_table->AddImageStringsToTable(space, [&](InternTable::UnorderedSet& interns)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ VLOG(image) << "AppImage:StringsInInternTable = " << interns.size();
+ for (auto it = interns.begin(); it != interns.end(); ) {
+ ObjPtr<mirror::String> string = it->Read();
+ ObjPtr<mirror::String> existing = intern_table->LookupWeak(self, string);
+ if (existing == nullptr) {
+ existing = intern_table->LookupStrong(self, string);
+ }
+ if (existing != nullptr) {
+ intern_remap.Put(string.Ptr(), existing.Ptr());
+ it = interns.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ });
+
+ VLOG(image) << "AppImage:ConflictingInternStrings = " << intern_remap.size();
+
+ // For debug builds, always run the code below to get coverage.
+ if (kIsDebugBuild || !intern_remap.empty()) {
+ // Slow path case is when there are conflicting intern strings to fix up.
+ UpdateInternStrings(space, intern_remap);
+ }
+}
+
// Update the class loader. Should only be used on classes in the image space.
class UpdateClassLoaderVisitor {
public:
@@ -1541,7 +1695,7 @@
public:
explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
- virtual void Visit(ArtMethod* method)
+ void Visit(ArtMethod* method) override
REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
@@ -1800,11 +1954,7 @@
VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2);
}
if (app_image) {
- AppImageClassLoadersAndDexCachesHelper::Update(this,
- space,
- class_loader,
- dex_caches,
- &temp_set);
+ AppImageLoadingHelper::Update(this, space, class_loader, dex_caches, &temp_set);
// Update class loader and resolved strings. If added_class_table is false, the resolved
// strings were forwarded UpdateAppImageClassLoadersAndDexCaches.
UpdateClassLoaderVisitor visitor(space, class_loader.Get());
@@ -1824,7 +1974,7 @@
// Force every app image class's SubtypeCheck to be at least kIninitialized.
//
// See also ImageWriter::FixupClass.
- ScopedTrace trace("Recalculate app image SubtypeCheck bitstrings");
+ ScopedTrace trace("AppImage:RecacluateSubtypeCheckBitstrings");
MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
for (const ClassTable::TableSlot& root : temp_set) {
SubtypeCheck<ObjPtr<mirror::Class>>::EnsureInitialized(root.Read());
@@ -1844,7 +1994,7 @@
if (kIsDebugBuild && app_image) {
// This verification needs to happen after the classes have been added to the class loader.
// Since it ensures classes are in the class table.
- ScopedTrace trace("VerifyAppImage");
+ ScopedTrace trace("AppImage:Verify");
VerifyAppImage(header, class_loader, dex_caches, class_table, space);
}
@@ -2166,13 +2316,14 @@
return dex_cache;
}
+template <bool kMovable>
ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size) {
DCHECK_GE(class_size, sizeof(mirror::Class));
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::Class::InitializeClassVisitor visitor(class_size);
- ObjPtr<mirror::Object> k = kMovingClasses ?
+ ObjPtr<mirror::Object> k = (kMovingClasses && kMovable) ?
heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
if (UNLIKELY(k == nullptr)) {
@@ -2186,6 +2337,18 @@
return AllocClass(self, GetClassRoot<mirror::Class>(this), class_size);
}
+ObjPtr<mirror::Class> ClassLinker::AllocPrimitiveArrayClass(Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class) {
+ // We make this class non-movable for the unlikely case where it were to be
+ // moved by a sticky-bit (minor) collection when using the Generational
+ // Concurrent Copying (CC) collector, potentially creating a stale reference
+ // in the `klass_` field of one of its instances allocated in the Large-Object
+ // Space (LOS) -- see the comment about the dirty card scanning logic in
+ // art::gc::collector::ConcurrentCopying::MarkingPhase.
+ return AllocClass</* kMovable */ false>(
+ self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
+}
+
ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> ClassLinker::AllocStackTraceElementArray(
Thread* self,
size_t length) {
@@ -2270,7 +2433,7 @@
return klass;
}
-typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry;
+using ClassPathEntry = std::pair<const DexFile*, const DexFile::ClassDef*>;
// Search a collection of DexFiles for a descriptor
ClassPathEntry FindInClassPath(const char* descriptor,
@@ -2468,7 +2631,8 @@
// the Java-side could still succeed for racy programs if another thread is actively
// modifying the class loader's path list.
- if (!self->CanCallIntoJava()) {
+ // The runtime is not allowed to call into java from a runtime-thread so just abort.
+ if (self->IsRuntimeThread()) {
// Oops, we can't call into java so we can't run actual class-loader code.
// This is true for e.g. for the compiler (jit or aot).
ObjPtr<mirror::Throwable> pre_allocated =
@@ -2601,6 +2765,17 @@
}
}
+ // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+ // code to be executed. We put it up here so we can avoid all the allocations associated with
+ // creating the class. This can happen with (eg) jit threads.
+ if (!self->CanLoadClasses()) {
+ // Make sure we don't try to load anything, potentially causing an infinite loop.
+ ObjPtr<mirror::Throwable> pre_allocated =
+ Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ self->SetException(pre_allocated);
+ return nullptr;
+ }
+
if (klass == nullptr) {
// Allocate a class with the status of not ready.
// Interface object should get the right size here. Regular class will
@@ -3589,6 +3764,18 @@
// Identify the underlying component type
CHECK_EQ('[', descriptor[0]);
StackHandleScope<2> hs(self);
+
+ // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+ // code to be executed. We put it up here so we can avoid all the allocations associated with
+ // creating the class. This can happen with (eg) jit threads.
+ if (!self->CanLoadClasses()) {
+ // Make sure we don't try to load anything, potentially causing an infinite loop.
+ ObjPtr<mirror::Throwable> pre_allocated =
+ Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ self->SetException(pre_allocated);
+ return nullptr;
+ }
+
MutableHandle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1,
class_loader)));
if (component_type == nullptr) {
@@ -3649,10 +3836,22 @@
new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::Object>>(this));
} else if (strcmp(descriptor, "[Ljava/lang/String;") == 0) {
new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::String>>(this));
+ } else if (strcmp(descriptor, "[Z") == 0) {
+ new_class.Assign(GetClassRoot<mirror::BooleanArray>(this));
+ } else if (strcmp(descriptor, "[B") == 0) {
+ new_class.Assign(GetClassRoot<mirror::ByteArray>(this));
+ } else if (strcmp(descriptor, "[C") == 0) {
+ new_class.Assign(GetClassRoot<mirror::CharArray>(this));
+ } else if (strcmp(descriptor, "[S") == 0) {
+ new_class.Assign(GetClassRoot<mirror::ShortArray>(this));
} else if (strcmp(descriptor, "[I") == 0) {
new_class.Assign(GetClassRoot<mirror::IntArray>(this));
} else if (strcmp(descriptor, "[J") == 0) {
new_class.Assign(GetClassRoot<mirror::LongArray>(this));
+ } else if (strcmp(descriptor, "[F") == 0) {
+ new_class.Assign(GetClassRoot<mirror::FloatArray>(this));
+ } else if (strcmp(descriptor, "[D") == 0) {
+ new_class.Assign(GetClassRoot<mirror::DoubleArray>(this));
}
}
if (new_class == nullptr) {
@@ -3764,6 +3963,7 @@
ObjPtr<mirror::Class> ClassLinker::InsertClass(const char* descriptor,
ObjPtr<mirror::Class> klass,
size_t hash) {
+ DCHECK(Thread::Current()->CanLoadClasses());
if (VLOG_IS_ON(class_linker)) {
ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
std::string source;
@@ -4169,6 +4369,7 @@
runtime->GetCompilerCallbacks(),
runtime->IsAotCompiler(),
log_level,
+ Runtime::Current()->GetTargetSdkVersion(),
error_msg);
}
@@ -4287,6 +4488,18 @@
jobjectArray methods,
jobjectArray throws) {
Thread* self = soa.Self();
+
+ // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
+ // code to be executed. We put it up here so we can avoid all the allocations associated with
+ // creating the class. This can happen with (eg) jit-threads.
+ if (!self->CanLoadClasses()) {
+ // Make sure we don't try to load anything, potentially causing an infinite loop.
+ ObjPtr<mirror::Throwable> pre_allocated =
+ Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
+ self->SetException(pre_allocated);
+ return nullptr;
+ }
+
StackHandleScope<10> hs(self);
MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
AllocClass(self, GetClassRoot<mirror::Class>(this), sizeof(mirror::Class))));
@@ -6404,7 +6617,7 @@
// iftable must be large enough to hold all interfaces without changing its size.
static size_t FillIfTable(ObjPtr<mirror::IfTable> iftable,
size_t super_ifcount,
- std::vector<ObjPtr<mirror::Class>> to_process)
+ const std::vector<ObjPtr<mirror::Class>>& to_process)
REQUIRES(Roles::uninterruptible_)
REQUIRES_SHARED(Locks::mutator_lock_) {
// This is the set of all class's already in the iftable. Used to make checking if a class has
@@ -7039,9 +7252,12 @@
// mark this as a default, non-abstract method, since thats what it is. Also clear the
// kAccSkipAccessChecks bit since this class hasn't been verified yet it shouldn't have
// methods that are skipping access checks.
+ // Also clear potential kAccSingleImplementation to avoid CHA trying to inline
+ // the default method.
DCHECK_EQ(new_method.GetAccessFlags() & kAccNative, 0u);
constexpr uint32_t kSetFlags = kAccDefault | kAccDefaultConflict | kAccCopied;
- constexpr uint32_t kMaskFlags = ~(kAccAbstract | kAccSkipAccessChecks);
+ constexpr uint32_t kMaskFlags =
+ ~(kAccAbstract | kAccSkipAccessChecks | kAccSingleImplementation);
new_method.SetAccessFlags((new_method.GetAccessFlags() | kSetFlags) & kMaskFlags);
DCHECK(new_method.IsDefaultConflicting());
// The actual method might or might not be marked abstract since we just copied it from a
@@ -8427,7 +8643,7 @@
target_method->GetShorty(&shorty_length);
int32_t num_params = static_cast<int32_t>(shorty_length + receiver_count - 1);
- StackHandleScope<7> hs(self);
+ StackHandleScope<5> hs(self);
ObjPtr<mirror::Class> array_of_class = GetClassRoot<mirror::ObjectArray<mirror::Class>>(this);
Handle<mirror::ObjectArray<mirror::Class>> method_params(hs.NewHandle(
mirror::ObjectArray<mirror::Class>::Alloc(self, array_of_class, num_params)));
@@ -8436,20 +8652,25 @@
return nullptr;
}
+ const DexFile* dex_file = referrer->GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method_handle.field_or_method_idx_);
int32_t index = 0;
if (receiver_count != 0) {
- // Insert receiver
- method_params->Set(index++, target_method->GetDeclaringClass());
+ // Insert receiver. Use the class identified in the method handle rather than the declaring
+ // class of the resolved method which may be super class or default interface method
+ // (b/115964401).
+ ObjPtr<mirror::Class> receiver_class = LookupResolvedType(method_id.class_idx_, referrer);
+ // receiver_class should have been resolved when resolving the target method.
+ DCHECK(receiver_class != nullptr);
+ method_params->Set(index++, receiver_class);
}
- DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
- Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
- Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
+
+ const DexFile::ProtoId& proto_id = dex_file->GetProtoId(method_id.proto_idx_);
+ DexFileParameterIterator it(*dex_file, proto_id);
while (it.HasNext()) {
DCHECK_LT(index, num_params);
const dex::TypeIndex type_idx = it.GetTypeIdx();
- ObjPtr<mirror::Class> klass = ResolveType(type_idx,
- target_method_dex_cache,
- target_method_class_loader);
+ ObjPtr<mirror::Class> klass = ResolveType(type_idx, referrer);
if (nullptr == klass) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8458,7 +8679,8 @@
it.Next();
}
- Handle<mirror::Class> return_type = hs.NewHandle(target_method->ResolveReturnType());
+ Handle<mirror::Class> return_type =
+ hs.NewHandle(ResolveType(proto_id.return_type_idx_, referrer));
if (UNLIKELY(return_type.IsNull())) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8548,6 +8770,49 @@
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
os << "Zygote loaded classes=" << NumZygoteClasses() << " post zygote classes="
<< NumNonZygoteClasses() << "\n";
+ ReaderMutexLock mu2(soa.Self(), *Locks::dex_lock_);
+ os << "Dumping registered class loaders\n";
+ size_t class_loader_index = 0;
+ for (const ClassLoaderData& class_loader : class_loaders_) {
+ ObjPtr<mirror::ClassLoader> loader =
+ ObjPtr<mirror::ClassLoader>::DownCast(soa.Self()->DecodeJObject(class_loader.weak_root));
+ if (loader != nullptr) {
+ os << "#" << class_loader_index++ << " " << loader->GetClass()->PrettyDescriptor() << ": [";
+ bool saw_one_dex_file = false;
+ for (const DexCacheData& dex_cache : dex_caches_) {
+ if (dex_cache.IsValid() && dex_cache.class_table == class_loader.class_table) {
+ if (saw_one_dex_file) {
+ os << ":";
+ }
+ saw_one_dex_file = true;
+ os << dex_cache.dex_file->GetLocation();
+ }
+ }
+ os << "]";
+ bool found_parent = false;
+ if (loader->GetParent() != nullptr) {
+ size_t parent_index = 0;
+ for (const ClassLoaderData& class_loader2 : class_loaders_) {
+ ObjPtr<mirror::ClassLoader> loader2 = ObjPtr<mirror::ClassLoader>::DownCast(
+ soa.Self()->DecodeJObject(class_loader2.weak_root));
+ if (loader2 == loader->GetParent()) {
+ os << ", parent #" << parent_index;
+ found_parent = true;
+ break;
+ }
+ parent_index++;
+ }
+ if (!found_parent) {
+ os << ", unregistered parent of type "
+ << loader->GetParent()->GetClass()->PrettyDescriptor();
+ }
+ } else {
+ os << ", no parent";
+ }
+ os << "\n";
+ }
+ }
+ os << "Done dumping class loaders\n";
}
class CountClassesVisitor : public ClassLoaderVisitor {
@@ -8607,6 +8872,19 @@
class_roots->Set<false>(index, klass);
}
+void ClassLinker::AllocAndSetPrimitiveArrayClassRoot(Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class,
+ ClassRoot primitive_array_class_root,
+ ClassRoot primitive_class_root,
+ const char* descriptor) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> primitive_array_class(hs.NewHandle(
+ AllocPrimitiveArrayClass(self, java_lang_Class)));
+ primitive_array_class->SetComponentType(GetClassRoot(primitive_class_root, this));
+ SetClassRoot(primitive_array_class_root, primitive_array_class.Get());
+ CheckSystemClass(self, primitive_array_class, descriptor);
+}
+
jobject ClassLinker::CreateWellKnownClassLoader(Thread* self,
const std::vector<const DexFile*>& dex_files,
jclass loader_class,
@@ -8657,7 +8935,7 @@
self,
kDexFileIndexStart + 1));
DCHECK(h_long_array != nullptr);
- h_long_array->Set(kDexFileIndexStart, reinterpret_cast<intptr_t>(dex_file));
+ h_long_array->Set(kDexFileIndexStart, reinterpret_cast64<int64_t>(dex_file));
// Note that this creates a finalizable dalvik.system.DexFile object and a corresponding
// FinalizerReference which will never get cleaned up without a started runtime.
@@ -8941,7 +9219,7 @@
ifcount * mirror::IfTable::kMax)));
}
-// Instantiate ResolveMethod.
+// Instantiate ClassLinker::ResolveMethod.
template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
@@ -8955,4 +9233,14 @@
ArtMethod* referrer,
InvokeType type);
+// Instantiate ClassLinker::AllocClass.
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ true>(
+ Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class,
+ uint32_t class_size);
+template ObjPtr<mirror::Class> ClassLinker::AllocClass</* kMovable */ false>(
+ Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class,
+ uint32_t class_size);
+
} // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index e4d9c96..b6f1f86 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -111,6 +111,8 @@
class ClassLinker {
public:
+ // Disabled until AppImageLoadingHelper::UpdateInternStrings does the missing GC card marks.
+ // Bug: 117846779
static constexpr bool kAppImageMayContainStrings = false;
explicit ClassLinker(InternTable* intern_table);
@@ -314,7 +316,10 @@
REQUIRES_SHARED(Locks::mutator_lock_);
template <ResolveMode kResolveMode>
- ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
+ ALWAYS_INLINE ArtMethod* ResolveMethod(Thread* self,
+ uint32_t method_idx,
+ ArtMethod* referrer,
+ InvokeType type)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ArtMethod* ResolveMethodWithoutInvokeType(uint32_t method_idx,
@@ -775,7 +780,11 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- // For early bootstrapping by Init
+ // For early bootstrapping by Init.
+ // If we do not allow moving classes (`art::kMovingClass` is false) or if
+ // parameter `kMovable` is false (or both), the class object is allocated in
+ // the non-moving space.
+ template <bool kMovable = true>
ObjPtr<mirror::Class> AllocClass(Thread* self,
ObjPtr<mirror::Class> java_lang_Class,
uint32_t class_size)
@@ -789,6 +798,12 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
+ // Allocate a primitive array class.
+ ObjPtr<mirror::Class> AllocPrimitiveArrayClass(Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
+
ObjPtr<mirror::DexCache> AllocDexCache(/*out*/ ObjPtr<mirror::String>* out_location,
Thread* self,
const DexFile& dex_file)
@@ -1206,6 +1221,20 @@
void SetClassRoot(ClassRoot class_root, ObjPtr<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Allocate primitive array class for primitive with class root
+ // `primitive_class_root`, and associate it to class root
+ // `primitive_array_class_root`.
+ //
+ // Also check this class returned when searching system classes for
+ // `descriptor` matches the allocated class.
+ void AllocAndSetPrimitiveArrayClassRoot(Thread* self,
+ ObjPtr<mirror::Class> java_lang_Class,
+ ClassRoot primitive_array_class_root,
+ ClassRoot primitive_class_root,
+ const char* descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Roles::uninterruptible_);
+
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
@@ -1328,7 +1357,7 @@
class FindVirtualMethodHolderVisitor;
- friend class AppImageClassLoadersAndDexCachesHelper;
+ friend class AppImageLoadingHelper;
friend class ImageDumper; // for DexLock
friend struct linker::CompilationHelper; // For Compile in ImageTest.
friend class linker::ImageWriter; // for GetClassRoots
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 2bd5411..5c8d685 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -16,7 +16,10 @@
#include "class_loader_context.h"
+#include <android-base/parseint.h>
+
#include "art_field-inl.h"
+#include "base/casts.h"
#include "base/dchecked_vector.h"
#include "base/stl_util.h"
#include "class_linker.h"
@@ -61,10 +64,10 @@
// make sure we do not de-allocate them.
for (ClassLoaderInfo& info : class_loader_chain_) {
for (std::unique_ptr<OatFile>& oat_file : info.opened_oat_files) {
- oat_file.release();
+ oat_file.release(); // NOLINT b/117926937
}
for (std::unique_ptr<const DexFile>& dex_file : info.opened_dex_files) {
- dex_file.release();
+ dex_file.release(); // NOLINT b/117926937
}
}
}
@@ -120,7 +123,7 @@
return false;
}
uint32_t checksum = 0;
- if (!ParseInt(dex_file_with_checksum[1].c_str(), &checksum)) {
+ if (!android::base::ParseUint(dex_file_with_checksum[1].c_str(), &checksum)) {
return false;
}
class_loader_chain_.back().classpath.push_back(dex_file_with_checksum[0]);
@@ -472,8 +475,8 @@
int32_t long_array_size = long_array->GetLength();
// Index 0 from the long array stores the oat file. The dex files start at index 1.
for (int32_t j = 1; j < long_array_size; ++j) {
- const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
- long_array->GetWithoutChecks(j)));
+ const DexFile* cp_dex_file =
+ reinterpret_cast64<const DexFile*>(long_array->GetWithoutChecks(j));
if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
// TODO(calin): It's unclear why the dex files with no classes are skipped here and when
// cp_dex_file can be null.
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index a233357..8d8e93a 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -57,12 +57,6 @@
return nullptr;
}
-// To take into account http://b/35845221
-#pragma clang diagnostic push
-#if __clang_major__ < 4
-#pragma clang diagnostic ignored "-Wunreachable-code"
-#endif
-
mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
WriterMutexLock mu(Thread::Current(), lock_);
// Should only be updating latest table.
@@ -88,8 +82,6 @@
return existing;
}
-#pragma clang diagnostic pop
-
size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
const ClassSet& set) const {
size_t count = 0;
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index c29043e..15ab5f0 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -27,6 +27,7 @@
#include "dex/primitive.h"
#include "handle_scope-inl.h"
#include "instrumentation.h"
+#include "interpreter/interpreter.h"
#include "interpreter/shadow_frame.h"
#include "interpreter/unstarted_runtime.h"
#include "jvalue-inl.h"
@@ -172,6 +173,14 @@
if (UNLIKELY(self->IsExceptionPending())) {
return false;
}
+ if (shadow_frame.GetForcePopFrame()) {
+ // We need to check this here since we expect that the FieldWriteEvent happens before the
+ // actual field write. If one pops the stack we should not modify the field. The next
+ // instruction will force a pop. Return true.
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ DCHECK(interpreter::PrevFrameWillRetry(self, shadow_frame));
+ return true;
+ }
}
switch (field_type) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index a5157df..1460562 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -191,6 +191,12 @@
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
+#define TEST_DISABLED() \
+ if ((true)) { \
+ printf("WARNING: TEST DISABLED\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_ARM() \
if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) { \
printf("WARNING: TEST DISABLED FOR ARM\n"); \
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 657a78b..7199d5e 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -771,13 +771,19 @@
// Avoid running Java code for exception initialization.
// TODO: Checks to make this a bit less brittle.
+ //
+ // Note: this lambda ensures that the destruction of the ScopedLocalRefs will run in the extended
+ // stack, which is important for modes with larger stack sizes (e.g., ASAN). Using a lambda
+ // instead of a block simplifies the control flow.
+ auto create_and_throw = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Allocate an uninitialized object.
+ ScopedLocalRef<jobject> exc(env,
+ env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+ if (exc == nullptr) {
+ LOG(WARNING) << "Could not allocate StackOverflowError object.";
+ return;
+ }
- std::string error_msg;
-
- // Allocate an uninitialized object.
- ScopedLocalRef<jobject> exc(env,
- env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
- if (exc.get() != nullptr) {
// "Initialize".
// StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
// Only Throwable has "custom" fields:
@@ -793,57 +799,54 @@
// detailMessage.
// TODO: Use String::FromModifiedUTF...?
ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
- if (s.get() != nullptr) {
- env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
-
- // cause.
- env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
-
- // suppressedExceptions.
- ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
- WellKnownClasses::java_util_Collections,
- WellKnownClasses::java_util_Collections_EMPTY_LIST));
- CHECK(emptylist.get() != nullptr);
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_suppressedExceptions,
- emptylist.get());
-
- // stackState is set as result of fillInStackTrace. fillInStackTrace calls
- // nativeFillInStackTrace.
- ScopedLocalRef<jobject> stack_state_val(env, nullptr);
- {
- ScopedObjectAccessUnchecked soa(env);
- stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
- }
- if (stack_state_val.get() != nullptr) {
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_stackState,
- stack_state_val.get());
-
- // stackTrace.
- ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
- WellKnownClasses::libcore_util_EmptyArray,
- WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
- env->SetObjectField(exc.get(),
- WellKnownClasses::java_lang_Throwable_stackTrace,
- stack_trace_elem.get());
- } else {
- error_msg = "Could not create stack trace.";
- }
- // Throw the exception.
- self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
- } else {
- // Could not allocate a string object.
- error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+ if (s == nullptr) {
+ LOG(WARNING) << "Could not throw new StackOverflowError because JNI NewStringUTF failed.";
+ return;
}
- } else {
- error_msg = "Could not allocate StackOverflowError object.";
- }
- if (!error_msg.empty()) {
- LOG(WARNING) << error_msg;
- CHECK(self->IsExceptionPending());
- }
+ env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get());
+
+ // cause.
+ env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get());
+
+ // suppressedExceptions.
+ ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+ WellKnownClasses::java_util_Collections,
+ WellKnownClasses::java_util_Collections_EMPTY_LIST));
+ CHECK(emptylist != nullptr);
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_suppressedExceptions,
+ emptylist.get());
+
+ // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+ // nativeFillInStackTrace.
+ ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+ {
+ ScopedObjectAccessUnchecked soa(env); // TODO: Is this necessary?
+ stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+ }
+ if (stack_state_val != nullptr) {
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_stackState,
+ stack_state_val.get());
+
+ // stackTrace.
+ ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+ WellKnownClasses::libcore_util_EmptyArray,
+ WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT));
+ env->SetObjectField(exc.get(),
+ WellKnownClasses::java_lang_Throwable_stackTrace,
+ stack_trace_elem.get());
+ } else {
+ LOG(WARNING) << "Could not create stack trace.";
+ // Note: we'll create an exception without stack state, which is valid.
+ }
+
+ // Throw the exception.
+ self->SetException(self->DecodeJObject(exc.get())->AsThrowable());
+ };
+ create_and_throw();
+ CHECK(self->IsExceptionPending());
bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
self->ResetDefaultStackEnd(); // Return to default stack size.
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 6855dcd..b29eb70 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -51,10 +51,6 @@
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
- // Return true if we should attempt to relocate to a random base address if we have not already
- // done so. Return false if relocating in this way would be problematic.
- virtual bool IsRelocationPossible() = 0;
-
virtual verifier::VerifierDeps* GetVerifierDeps() const = 0;
virtual void SetVerifierDeps(verifier::VerifierDeps* deps ATTRIBUTE_UNUSED) {}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 366b5ec..b679cbe 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -256,17 +256,6 @@
<< " " << dex_pc << ", " << dex_pc_offset;
}
- // We only care about invokes in the Jit.
- void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
- ArtMethod* method,
- uint32_t dex_pc,
- ArtMethod* target ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(Locks::mutator_lock_) {
- LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
- << " " << dex_pc;
- }
-
// TODO Might be worth it to post ExceptionCatch event.
void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
@@ -897,7 +886,7 @@
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (!GetMethod()->IsRuntimeMethod()) {
Monitor::VisitLocks(this, AppendOwnedMonitors, this);
++current_stack_depth;
@@ -1667,18 +1656,6 @@
}
void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
- struct DebugCallbackContext {
- int numItems;
- JDWP::ExpandBuf* pReply;
-
- static bool Callback(void* context, const DexFile::PositionInfo& entry) {
- DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
- expandBufAdd8BE(pContext->pReply, entry.address_);
- expandBufAdd4BE(pContext->pReply, entry.line_);
- pContext->numItems++;
- return false;
- }
- };
ArtMethod* m = FromMethodId(method_id);
CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
uint64_t start, end;
@@ -1699,52 +1676,19 @@
size_t numLinesOffset = expandBufGetLength(pReply);
expandBufAdd4BE(pReply, 0);
- DebugCallbackContext context;
- context.numItems = 0;
- context.pReply = pReply;
+ int numItems = 0;
+ accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ expandBufAdd8BE(pReply, entry.address_);
+ expandBufAdd4BE(pReply, entry.line_);
+ numItems++;
+ return false;
+ });
- if (accessor.HasCodeItem()) {
- m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(),
- DebugCallbackContext::Callback,
- &context);
- }
-
- JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
+ JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, numItems);
}
void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
JDWP::ExpandBuf* pReply) {
- struct DebugCallbackContext {
- ArtMethod* method;
- JDWP::ExpandBuf* pReply;
- size_t variable_count;
- bool with_generic;
-
- static void Callback(void* context, const DexFile::LocalInfo& entry)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
-
- uint16_t slot = entry.reg_;
- VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
- pContext->variable_count, entry.start_address_,
- entry.end_address_ - entry.start_address_,
- entry.name_, entry.descriptor_, entry.signature_, slot,
- MangleSlot(slot, pContext->method));
-
- slot = MangleSlot(slot, pContext->method);
-
- expandBufAdd8BE(pContext->pReply, entry.start_address_);
- expandBufAddUtf8String(pContext->pReply, entry.name_);
- expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
- if (pContext->with_generic) {
- expandBufAddUtf8String(pContext->pReply, entry.signature_);
- }
- expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
- expandBufAdd4BE(pContext->pReply, slot);
-
- ++pContext->variable_count;
- }
- };
ArtMethod* m = FromMethodId(method_id);
CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
@@ -1756,24 +1700,39 @@
size_t variable_count_offset = expandBufGetLength(pReply);
expandBufAdd4BE(pReply, 0);
- DebugCallbackContext context;
- context.method = m;
- context.pReply = pReply;
- context.variable_count = 0;
- context.with_generic = with_generic;
+ size_t variable_count = 0;
if (accessor.HasCodeItem()) {
- m->GetDexFile()->DecodeDebugLocalInfo(accessor.RegistersSize(),
- accessor.InsSize(),
- accessor.InsnsSizeInCodeUnits(),
- accessor.DebugInfoOffset(),
- m->IsStatic(),
- m->GetDexMethodIndex(),
- DebugCallbackContext::Callback,
- &context);
+ accessor.DecodeDebugLocalInfo(m->IsStatic(),
+ m->GetDexMethodIndex(),
+ [&](const DexFile::LocalInfo& entry)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint16_t slot = entry.reg_;
+ VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
+ variable_count,
+ entry.start_address_,
+ entry.end_address_ - entry.start_address_,
+ entry.name_,
+ entry.descriptor_, entry.signature_,
+ slot,
+ MangleSlot(slot, m));
+
+ slot = MangleSlot(slot, m);
+
+ expandBufAdd8BE(pReply, entry.start_address_);
+ expandBufAddUtf8String(pReply, entry.name_);
+ expandBufAddUtf8String(pReply, entry.descriptor_);
+ if (with_generic) {
+ expandBufAddUtf8String(pReply, entry.signature_);
+ }
+ expandBufAdd4BE(pReply, entry.end_address_- entry.start_address_);
+ expandBufAdd4BE(pReply, slot);
+
+ ++variable_count;
+ });
}
- JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
+ JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, variable_count);
}
void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
@@ -2406,7 +2365,7 @@
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (!GetMethod()->IsRuntimeMethod()) {
++depth;
}
@@ -2576,7 +2535,7 @@
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (frame_id != GetFrameId()) {
return true; // continue
} else {
@@ -2618,7 +2577,7 @@
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
if (GetFrameId() != frame_id_) {
return true; // Not our frame, carry on.
}
@@ -3831,7 +3790,7 @@
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
- bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* m = GetMethod();
if (!m->IsRuntimeMethod()) {
++stack_depth;
@@ -3855,50 +3814,6 @@
SingleStepStackVisitor visitor(thread);
visitor.WalkStack();
- // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
- struct DebugCallbackContext {
- DebugCallbackContext(SingleStepControl* single_step_control_cb,
- int32_t line_number_cb, uint32_t num_insns_in_code_units)
- : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
- num_insns_in_code_units_(num_insns_in_code_units), last_pc_valid(false), last_pc(0) {
- }
-
- static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
- DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
- if (static_cast<int32_t>(entry.line_) == context->line_number_) {
- if (!context->last_pc_valid) {
- // Everything from this address until the next line change is ours.
- context->last_pc = entry.address_;
- context->last_pc_valid = true;
- }
- // Otherwise, if we're already in a valid range for this line,
- // just keep going (shouldn't really happen)...
- } else if (context->last_pc_valid) { // and the line number is new
- // Add everything from the last entry up until here to the set
- for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
- context->single_step_control_->AddDexPc(dex_pc);
- }
- context->last_pc_valid = false;
- }
- return false; // There may be multiple entries for any given line.
- }
-
- ~DebugCallbackContext() {
- // If the line number was the last in the position table...
- if (last_pc_valid) {
- for (uint32_t dex_pc = last_pc; dex_pc < num_insns_in_code_units_; ++dex_pc) {
- single_step_control_->AddDexPc(dex_pc);
- }
- }
- }
-
- SingleStepControl* const single_step_control_;
- const int32_t line_number_;
- const uint32_t num_insns_in_code_units_;
- bool last_pc_valid;
- uint32_t last_pc;
- };
-
// Allocate single step.
SingleStepControl* single_step_control =
new (std::nothrow) SingleStepControl(step_size, step_depth,
@@ -3914,10 +3829,33 @@
// method on the stack (and no line number either).
if (m != nullptr && !m->IsNative()) {
CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
- DebugCallbackContext context(single_step_control, line_number, accessor.InsnsSizeInCodeUnits());
- m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(),
- DebugCallbackContext::Callback,
- &context);
+ bool last_pc_valid = false;
+ uint32_t last_pc = 0u;
+ // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
+ accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ if (static_cast<int32_t>(entry.line_) == line_number) {
+ if (!last_pc_valid) {
+ // Everything from this address until the next line change is ours.
+ last_pc = entry.address_;
+ last_pc_valid = true;
+ }
+ // Otherwise, if we're already in a valid range for this line,
+ // just keep going (shouldn't really happen)...
+ } else if (last_pc_valid) { // and the line number is new
+ // Add everything from the last entry up until here to the set
+ for (uint32_t dex_pc = last_pc; dex_pc < entry.address_; ++dex_pc) {
+ single_step_control->AddDexPc(dex_pc);
+ }
+ last_pc_valid = false;
+ }
+ return false; // There may be multiple entries for any given line.
+ });
+ // If the line number was the last in the position table...
+ if (last_pc_valid) {
+ for (uint32_t dex_pc = last_pc; dex_pc < accessor.InsnsSizeInCodeUnits(); ++dex_pc) {
+ single_step_control->AddDexPc(dex_pc);
+ }
+ }
}
// Activate single-step in the thread.
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index b87bf8d..b50a430 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -209,7 +209,7 @@
case DexFile::kDexAnnotationArray:
{
uint32_t size = DecodeUnsignedLeb128(&annotation);
- while (size--) {
+ for (; size != 0u; --size) {
if (!SkipAnnotationValue(dex_file, &annotation)) {
return false;
}
@@ -221,7 +221,7 @@
{
DecodeUnsignedLeb128(&annotation); // unused type_index
uint32_t size = DecodeUnsignedLeb128(&annotation);
- while (size--) {
+ for (; size != 0u; --size) {
DecodeUnsignedLeb128(&annotation); // unused element_name_index
if (!SkipAnnotationValue(dex_file, &annotation)) {
return false;
@@ -1578,9 +1578,9 @@
DCHECK(accessor.HasCodeItem()) << method->PrettyMethod() << " " << dex_file->GetLocation();
// A method with no line number info should return -1
- DexFile::LineNumFromPcContext context(rel_pc, -1);
- dex_file->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), DexFile::LineNumForPcCb, &context);
- return context.line_num_;
+ uint32_t line_num = -1;
+ accessor.GetLineNumForPc(rel_pc, &line_num);
+ return line_num;
}
template<bool kTransactionActive>
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 93af77f..429ecd3 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -39,8 +39,6 @@
void DexoptTest::PreRuntimeCreate() {
std::string error_msg;
- ASSERT_TRUE(PreRelocateImage(GetImageLocation(), &error_msg)) << error_msg;
- ASSERT_TRUE(PreRelocateImage(GetImageLocation2(), &error_msg)) << error_msg;
UnreserveImageSpace();
}
@@ -89,25 +87,12 @@
}
void DexoptTest::GenerateOatForTest(const std::string& dex_location,
- const std::string& oat_location_in,
+ const std::string& oat_location,
CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
bool with_alternate_image,
const char* compilation_reason) {
std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
- std::string oat_location = oat_location_in;
- if (!relocate) {
- // Temporarily redirect the dalvik cache so dex2oat doesn't find the
- // relocated image file.
- ASSERT_EQ(0, rename(dalvik_cache.c_str(), dalvik_cache_tmp.c_str())) << strerror(errno);
- // If the oat location is in dalvik cache, replace the cache path with the temporary one.
- size_t pos = oat_location.find(dalvik_cache);
- if (pos != std::string::npos) {
- oat_location = oat_location.replace(pos, dalvik_cache.length(), dalvik_cache_tmp);
- }
- }
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
@@ -125,10 +110,6 @@
args.push_back("--profile-file=" + profile_file.GetFilename());
}
- if (pic) {
- args.push_back("--compile-pic");
- }
-
std::string image_location = GetImageLocation();
if (with_alternate_image) {
args.push_back("--boot-image=" + GetImageLocation2());
@@ -141,24 +122,16 @@
std::string error_msg;
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- if (!relocate) {
- // Restore the dalvik cache if needed.
- ASSERT_EQ(0, rename(dalvik_cache_tmp.c_str(), dalvik_cache.c_str())) << strerror(errno);
- oat_location = oat_location_in;
- }
-
// Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- EXPECT_EQ(pic, odex_file->IsPic());
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
std::unique_ptr<ImageHeader> image_header(
@@ -176,51 +149,22 @@
EXPECT_EQ(combined_checksum, oat_header.GetImageFileLocationOatChecksum());
}
}
-
- if (!with_alternate_image) {
- if (CompilerFilter::IsAotCompilationEnabled(filter)) {
- if (relocate) {
- EXPECT_EQ(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
- oat_header.GetImageFileLocationOatDataBegin());
- EXPECT_EQ(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
- } else {
- EXPECT_NE(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
- oat_header.GetImageFileLocationOatDataBegin());
- EXPECT_NE(image_header->GetPatchDelta(), oat_header.GetImagePatchDelta());
- }
- }
- }
}
void DexoptTest::GenerateOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter) {
+ const std::string& odex_location,
+ CompilerFilter::Filter filter,
+ const char* compilation_reason) {
GenerateOatForTest(dex_location,
odex_location,
filter,
- /*relocate*/false,
- /*pic*/false,
- /*with_alternate_image*/false);
-}
-
-void DexoptTest::GeneratePicOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter,
- const char* compilation_reason) {
- GenerateOatForTest(dex_location,
- odex_location,
- filter,
- /*relocate*/false,
- /*pic*/true,
- /*with_alternate_image*/false,
+ /*with_alternate_image=*/ false,
compilation_reason);
}
void DexoptTest::GenerateOatForTest(const char* dex_location,
- CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
- bool with_alternate_image) {
+ CompilerFilter::Filter filter,
+ bool with_alternate_image) {
std::string oat_location;
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
@@ -228,45 +172,11 @@
GenerateOatForTest(dex_location,
oat_location,
filter,
- relocate,
- pic,
with_alternate_image);
}
void DexoptTest::GenerateOatForTest(const char* dex_location, CompilerFilter::Filter filter) {
- GenerateOatForTest(dex_location,
- filter,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/false);
-}
-
-bool DexoptTest::PreRelocateImage(const std::string& image_location, std::string* error_msg) {
- std::string dalvik_cache;
- bool have_android_data;
- bool dalvik_cache_exists;
- bool is_global_cache;
- GetDalvikCache(GetInstructionSetString(kRuntimeISA),
- true,
- &dalvik_cache,
- &have_android_data,
- &dalvik_cache_exists,
- &is_global_cache);
- if (!dalvik_cache_exists) {
- *error_msg = "Failed to create dalvik cache";
- return false;
- }
-
- std::string patchoat = GetAndroidRoot();
- patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
- argv.push_back("--input-image-location=" + image_location);
- argv.push_back("--output-image-directory=" + dalvik_cache);
- argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
- argv.push_back("--base-offset-delta=0x00008000");
- return Exec(argv, error_msg);
+ GenerateOatForTest(dex_location, filter, /*with_alternate_image=*/ false);
}
void DexoptTest::ReserveImageSpace() {
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index 5dff379..efbdcba 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -36,36 +36,24 @@
// The oat file will be generated for dex_location in the given oat_location
// with the following configuration:
// filter - controls the compilation filter
- // pic - whether or not the code will be PIC
- // relocate - if true, the oat file will be relocated with respect to the
- // boot image. Otherwise the oat file will not be relocated.
// with_alternate_image - if true, the oat file will be generated with an
// image checksum different than the current image checksum.
void GenerateOatForTest(const std::string& dex_location,
const std::string& oat_location,
CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
bool with_alternate_image,
const char* compilation_reason = nullptr);
- // Generate a non-PIC odex file for the purposes of test.
- // The generated odex file will be un-relocated.
+ // Generate an odex file for the purposes of test.
void GenerateOdexForTest(const std::string& dex_location,
const std::string& odex_location,
- CompilerFilter::Filter filter);
-
- void GeneratePicOdexForTest(const std::string& dex_location,
- const std::string& odex_location,
- CompilerFilter::Filter filter,
- const char* compilation_reason = nullptr);
+ CompilerFilter::Filter filter,
+ const char* compilation_reason = nullptr);
// Generate an oat file for the given dex location in its oat location (under
// the dalvik cache).
void GenerateOatForTest(const char* dex_location,
CompilerFilter::Filter filter,
- bool relocate,
- bool pic,
bool with_alternate_image);
// Generate a standard oat file in the oat location.
@@ -74,11 +62,6 @@
static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
private:
- // Pre-Relocate the image to a known non-zero offset so we don't have to
- // deal with the runtime randomly relocating the image by 0 and messing up
- // the expected results of the tests.
- bool PreRelocateImage(const std::string& image_location, std::string* error_msg);
-
// Reserve memory around where the image will be loaded so other memory
// won't conflict when it comes time to load the image.
// This can be called with an already loaded image to reserve the space
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e7715c4..ce742fe 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1417,7 +1417,7 @@
void ElfFileImpl<ElfTypes>::ApplyOatPatches(
const uint8_t* patches, const uint8_t* patches_end, Elf_Addr delta,
uint8_t* to_patch, const uint8_t* to_patch_end) {
- typedef __attribute__((__aligned__(1))) Elf_Addr UnalignedAddress;
+ using UnalignedAddress __attribute__((__aligned__(1))) = Elf_Addr;
while (patches < patches_end) {
to_patch += DecodeUnsignedLeb128(&patches);
DCHECK_LE(patches, patches_end) << "Unexpected end of patch list.";
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index e6f3d0b..35bfa91 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -93,15 +93,17 @@
// even going back from boot image methods to the same oat file. However, this is
// not currently implemented in the compiler. Therefore crossing dex file boundary
// indicates that the inlined definition is not the same as the one used at runtime.
- LOG(FATAL) << "Inlined method resolution crossed dex file boundary: from "
- << method->PrettyMethod()
- << " in " << method->GetDexFile()->GetLocation() << "/"
- << static_cast<const void*>(method->GetDexFile())
- << " to " << inlined_method->PrettyMethod()
- << " in " << inlined_method->GetDexFile()->GetLocation() << "/"
- << static_cast<const void*>(inlined_method->GetDexFile()) << ". "
- << "This must be due to duplicate classes or playing wrongly with class loaders";
- UNREACHABLE();
+ bool target_sdk_pre_p = Runtime::Current()->GetTargetSdkVersion() < 28;
+ LOG(target_sdk_pre_p ? WARNING : FATAL)
+ << "Inlined method resolution crossed dex file boundary: from "
+ << method->PrettyMethod()
+ << " in " << method->GetDexFile()->GetLocation() << "/"
+ << static_cast<const void*>(method->GetDexFile())
+ << " to " << inlined_method->PrettyMethod()
+ << " in " << inlined_method->GetDexFile()->GetLocation() << "/"
+ << static_cast<const void*>(inlined_method->GetDexFile()) << ". "
+ << "This must be due to duplicate classes or playing wrongly with class loaders. "
+ << "The runtime is in an unsafe state.";
}
method = inlined_method;
}
@@ -317,20 +319,9 @@
ArtMethod* referrer,
Thread* self,
size_t expected_size) {
- bool is_primitive;
- bool is_set;
- bool is_static;
- switch (type) {
- case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break;
- case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break;
- case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break;
- case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break;
- case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break;
- case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break;
- case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break;
- case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through.
- default: is_primitive = true; is_set = true; is_static = true; break;
- }
+ constexpr bool is_primitive = (type & FindFieldFlags::PrimitiveBit) != 0;
+ constexpr bool is_set = (type & FindFieldFlags::WriteBit) != 0;
+ constexpr bool is_static = (type & FindFieldFlags::StaticBit) != 0;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field;
@@ -431,28 +422,17 @@
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
+// Follow virtual/interface indirections if applicable.
+// Will throw null-pointer exception the if the object is null.
template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self) {
+ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
+ ArtMethod* resolved_method,
+ ObjPtr<mirror::Object>* this_object,
+ ArtMethod* referrer,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- ArtMethod* resolved_method;
- if (type == kStatic) {
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- } else {
- StackHandleScope<1> hs(self);
- HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- }
- if (UNLIKELY(resolved_method == nullptr)) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
- }
- // Next, null pointer check.
+ // Null pointer check.
if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
resolved_method->IsConstructor())) {
@@ -550,7 +530,13 @@
UNREACHABLE();
}
case kInterface: {
- uint32_t imt_index = ImTable::GetImtIndex(resolved_method);
+ size_t imt_index;
+ InterpreterCache* tls_cache = self->GetInterpreterCache();
+ if (UNLIKELY(!tls_cache->Get(resolved_method, &imt_index))) {
+ imt_index = ImTable::GetImtIndex(resolved_method);
+ tls_cache->Set(resolved_method, imt_index);
+ }
+ DCHECK_EQ(imt_index, ImTable::GetImtIndex(resolved_method));
PointerSize pointer_size = class_linker->GetImagePointerSize();
ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
@@ -581,6 +567,31 @@
}
}
+template<InvokeType type, bool access_check>
+inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
+ ObjPtr<mirror::Object>* this_object,
+ ArtMethod* referrer,
+ Thread* self) {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ constexpr ClassLinker::ResolveMode resolve_mode =
+ access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+ : ClassLinker::ResolveMode::kNoChecks;
+ ArtMethod* resolved_method;
+ if (type == kStatic) {
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+ } else {
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
+ }
+ if (UNLIKELY(resolved_method == nullptr)) {
+ DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
+ return nullptr; // Failure.
+ }
+ return FindMethodToCall<type, access_check>(
+ method_idx, resolved_method, this_object, referrer, self);
+}
+
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
@@ -611,22 +622,9 @@
return nullptr;
}
// Check for incompatible class change.
- bool is_primitive;
- bool is_set;
- bool is_static;
- switch (type) {
- case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break;
- case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break;
- case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break;
- case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break;
- case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break;
- case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break;
- case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break;
- case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
+ const bool is_primitive = (type & FindFieldFlags::PrimitiveBit) != 0;
+ const bool is_set = (type & FindFieldFlags::WriteBit) != 0;
+ const bool is_static = (type & FindFieldFlags::StaticBit) != 0;
if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
// Incompatible class change.
return nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 9d70b03..c8bf6d0 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -103,16 +103,25 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
+enum FindFieldFlags {
+ InstanceBit = 1 << 0,
+ StaticBit = 1 << 1,
+ ObjectBit = 1 << 2,
+ PrimitiveBit = 1 << 3,
+ ReadBit = 1 << 4,
+ WriteBit = 1 << 5,
+};
+
// Type of find field operation for fast and slow case.
enum FindFieldType {
- InstanceObjectRead,
- InstanceObjectWrite,
- InstancePrimitiveRead,
- InstancePrimitiveWrite,
- StaticObjectRead,
- StaticObjectWrite,
- StaticPrimitiveRead,
- StaticPrimitiveWrite,
+ InstanceObjectRead = InstanceBit | ObjectBit | ReadBit,
+ InstanceObjectWrite = InstanceBit | ObjectBit | WriteBit,
+ InstancePrimitiveRead = InstanceBit | PrimitiveBit | ReadBit,
+ InstancePrimitiveWrite = InstanceBit | PrimitiveBit | WriteBit,
+ StaticObjectRead = StaticBit | ObjectBit | ReadBit,
+ StaticObjectWrite = StaticBit | ObjectBit | WriteBit,
+ StaticPrimitiveRead = StaticBit | PrimitiveBit | ReadBit,
+ StaticPrimitiveWrite = StaticBit | PrimitiveBit | WriteBit,
};
template<FindFieldType type, bool access_check>
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 62cc9de..d38e3ed 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -28,13 +28,6 @@
namespace art {
-inline constexpr bool FindFieldTypeIsRead(FindFieldType type) {
- return type == InstanceObjectRead ||
- type == InstancePrimitiveRead ||
- type == StaticObjectRead ||
- type == StaticPrimitiveRead;
-}
-
// Helper function to do a null check after trying to resolve the field. Not for statics since obj
// does not exist there. There is a suspend check, object is a double pointer to update the value
// in the caller in case it moves.
@@ -50,7 +43,7 @@
HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
if (LIKELY(field != nullptr) && UNLIKELY(h == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/FindFieldTypeIsRead(type));
+ ThrowNullPointerExceptionForFieldAccess(field, (type & FindFieldFlags::ReadBit) != 0);
return nullptr;
}
return field;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index fccfce4..84631c3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -753,6 +753,7 @@
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
JValue result;
+ bool force_frame_pop = false;
if (UNLIKELY(deopt_frame != nullptr)) {
HandleDeoptimization(&result, method, deopt_frame, &fragment);
@@ -788,6 +789,7 @@
}
result = interpreter::EnterInterpreterFromEntryPoint(self, accessor, shadow_frame);
+ force_frame_pop = shadow_frame->GetForcePopFrame();
}
// Pop transition.
@@ -804,12 +806,20 @@
LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
<< caller->PrettyMethod();
} else {
+ VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
+ << " to " << caller->PrettyMethod()
+ << (force_frame_pop ? " for frame-pop" : "");
+ DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
+ if (force_frame_pop && self->GetException() != nullptr) {
+ LOG(WARNING) << "Suppressing exception for instruction-retry: "
+ << self->GetException()->Dump();
+ }
// Push the context of the deoptimization stack so we can restore the return value and the
// exception before executing the deoptimized frames.
self->PushDeoptimizationContext(
result,
shorty[0] == 'L' || shorty[0] == '[', /* class or array */
- self->GetException(),
+ force_frame_pop ? nullptr : self->GetException(),
false /* from_code */,
DeoptimizationMethodType::kDefault);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cb85804..f451978 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -127,9 +127,7 @@
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, jni_entrypoints, sizeof(size_t));
// Skip across the entrypoints structures.
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, rosalloc_runs, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, rosalloc_runs, thread_local_alloc_stack_top,
sizeof(void*) * kNumRosAllocThreadLocalSizeBracketsInThread);
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
@@ -140,8 +138,11 @@
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
- EXPECT_OFFSET_DIFF(Thread, tlsPtr_.async_exception, Thread, wait_mutex_, sizeof(void*),
- thread_tlsptr_end);
+ // The first field after tlsPtr_ is forced to a 16 byte alignment so it might have some space.
+ auto offset_tlsptr_end = OFFSETOF_MEMBER(Thread, tlsPtr_) +
+ sizeof(decltype(reinterpret_cast<Thread*>(16)->tlsPtr_));
+ CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.async_exception) == sizeof(void*),
+ "async_exception last field");
}
void CheckJniEntryPoints() {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0562167..bf26aea 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -830,16 +830,16 @@
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
- static size_t RunFreeListOffset() {
+ static constexpr size_t RunFreeListOffset() {
return OFFSETOF_MEMBER(Run, free_list_);
}
- static size_t RunFreeListHeadOffset() {
+ static constexpr size_t RunFreeListHeadOffset() {
return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
}
- static size_t RunFreeListSizeOffset() {
+ static constexpr size_t RunFreeListSizeOffset() {
return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
}
- static size_t RunSlotNextOffset() {
+ static constexpr size_t RunSlotNextOffset() {
return OFFSETOF_MEMBER(Slot, next_);
}
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 2f1f577..992c32a 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -23,15 +23,19 @@
namespace gc {
// Different types of allocators.
+// Those marked with * have fast path entrypoints callable from generated code.
enum AllocatorType {
- kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
- kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
- kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
- kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
- kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
- kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
- kAllocatorTypeRegion,
- kAllocatorTypeRegionTLAB,
+ // BumpPointer spaces are currently only used for ZygoteSpace construction.
+ kAllocatorTypeBumpPointer, // Use global CAS-based BumpPointer allocator. (*)
+ kAllocatorTypeTLAB, // Use TLAB allocator within BumpPointer space. (*)
+ kAllocatorTypeRosAlloc, // Use RosAlloc (segregated size, free list) allocator. (*)
+ kAllocatorTypeDlMalloc, // Use dlmalloc (well-known C malloc) allocator. (*)
+ kAllocatorTypeNonMoving, // Special allocator for non moving objects.
+ kAllocatorTypeLOS, // Large object space.
+ // The following differ from the BumpPointer allocators primarily in that memory is
+ // allocated from multiple regions, instead of a single contiguous space.
+ kAllocatorTypeRegion, // Use CAS-based contiguous bump-pointer allocation within a region. (*)
+ kAllocatorTypeRegionTLAB, // Use region pieces as TLABs. Default for most small objects. (*)
};
std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index fc23ab8..46cc79c 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -406,7 +406,7 @@
concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
reinterpret_cast<Atomic<size_t>*>(
&concurrent_copying_->from_space_num_objects_at_first_pause_)->
- fetch_add(thread_local_objects, std::memory_order_seq_cst);
+ fetch_add(thread_local_objects, std::memory_order_relaxed);
} else {
concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
}
@@ -423,7 +423,7 @@
void VisitRoots(mirror::Object*** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED)
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
@@ -440,7 +440,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
- const RootInfo& info ATTRIBUTE_UNUSED)
+ const RootInfo& info ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
@@ -573,8 +573,10 @@
if (ref != nullptr) {
if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
// Not immune, must be a zygote large object.
- CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
- Thread::Current(), ref.Ptr()))
+ space::LargeObjectSpace* large_object_space =
+ Runtime::Current()->GetHeap()->GetLargeObjectsSpace();
+ CHECK(large_object_space->Contains(ref.Ptr()) &&
+ large_object_space->IsZygoteLargeObject(Thread::Current(), ref.Ptr()))
<< "Non gray object references non immune, non zygote large object "<< ref << " "
<< mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
<< mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
@@ -643,9 +645,16 @@
void CheckReference(mirror::Object* ref, int32_t offset = -1) const
REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
+ if (ref != nullptr && cc_->region_space_->IsInNewlyAllocatedRegion(ref)) {
+ LOG(FATAL_WITHOUT_ABORT)
<< holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
<< ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
+ LOG(FATAL_WITHOUT_ABORT) << "time=" << cc_->region_space_->Time();
+ constexpr const char* kIndent = " ";
+ LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(holder_.Ptr(), "holder_", kIndent);
+ LOG(FATAL_WITHOUT_ABORT) << cc_->DumpReferenceInfo(ref, "ref", kIndent);
+ LOG(FATAL) << "Unexpected reference to newly allocated region.";
+ }
}
private:
@@ -898,13 +907,8 @@
// during a minor (young-generation) collection:
// - In the case where we run with a boot image, these classes are part of the image space,
// which is an immune space.
- // - In the case where we run without a boot image, these classes are allocated in the region
- // space (main space), but they are not expected to move during a minor collection (this
- // would only happen if those classes were allocated between a major and a minor
- // collections, which is unlikely -- we don't expect any GC to happen before these
- // fundamental classes are initialized). Note that these classes could move during a major
- // collection though, but this is fine: in that case, the whole heap is traced and the card
- // table logic below is not used.
+ // - In the case where we run without a boot image, these classes are allocated in the
+ // non-moving space (see art::ClassLinker::InitWithoutImage).
Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
space->GetMarkBitmap(),
space->Begin(),
@@ -2011,9 +2015,9 @@
const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
- uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst) + bytes_moved_gc_thread_;
+ uint64_t to_bytes = bytes_moved_.load(std::memory_order_relaxed) + bytes_moved_gc_thread_;
cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
- uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst) + objects_moved_gc_thread_;
+ uint64_t to_objects = objects_moved_.load(std::memory_order_relaxed) + objects_moved_gc_thread_;
cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
@@ -2045,12 +2049,12 @@
<< " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
<< " to_space size=" << region_space_->ToSpaceSize();
LOG(INFO) << "(before) num_bytes_allocated="
- << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
+ << heap_->num_bytes_allocated_.load();
}
RecordFree(ObjectBytePair(freed_objects, freed_bytes));
if (kVerboseMode) {
LOG(INFO) << "(after) num_bytes_allocated="
- << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst);
+ << heap_->num_bytes_allocated_.load();
}
}
@@ -2320,9 +2324,11 @@
void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
mirror::Object* ref) {
+ CHECK(ref != nullptr);
CHECK(!region_space_->HasAddress(ref)) << "obj=" << obj << " ref=" << ref;
// In a non-moving space. Check that the ref is marked.
if (immune_spaces_.ContainsObject(ref)) {
+ // Immune space case.
if (kUseBakerReadBarrier) {
// Immune object may not be gray if called from the GC.
if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
@@ -2336,23 +2342,68 @@
<< " updated_all_immune_objects=" << updated_all_immune_objects;
}
} else {
+ // Non-moving space and large-object space (LOS) cases.
accounting::ContinuousSpaceBitmap* mark_bitmap =
heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
accounting::LargeObjectBitmap* los_bitmap =
heap_mark_bitmap_->GetLargeObjectBitmap(ref);
- bool is_los = mark_bitmap == nullptr;
- if ((!is_los && mark_bitmap->Test(ref)) ||
- (is_los && los_bitmap->Test(ref))) {
- // OK.
- } else {
- // If `ref` is on the allocation stack, then it may not be
- // marked live, but considered marked/alive (but not
- // necessarily on the live stack).
- CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack."
- << " obj=" << obj
- << " ref=" << ref
- << " is_los=" << std::boolalpha << is_los << std::noboolalpha;
- }
+ bool is_los = (mark_bitmap == nullptr);
+
+ bool marked_in_non_moving_space_or_los =
+ (kUseBakerReadBarrier
+ && kEnableGenerationalConcurrentCopyingCollection
+ && young_gen_
+ && !done_scanning_.load(std::memory_order_acquire))
+ // Don't use the mark bitmap to ensure `ref` is marked: check that the
+ // read barrier state is gray instead. This is to take into account a
+ // potential race between two read barriers on the same reference when the
+ // young-generation collector is still scanning the dirty cards.
+ //
+ // For instance consider two concurrent read barriers on the same GC root
+ // reference during the dirty-card-scanning step of a young-generation
+ // collection. Both threads would call ReadBarrier::BarrierForRoot, which
+ // would:
+ // a. mark the reference (leading to a call to
+ // ConcurrentCopying::MarkNonMoving); then
+ // b. check the to-space invariant (leading to a call this
+ // ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace -- this
+ // method).
+ //
+ // In this situation, the following race could happen:
+ // 1. Thread A successfully changes `ref`'s read barrier state from
+ // non-gray (white) to gray (with AtomicSetReadBarrierState) in
+ // ConcurrentCopying::MarkNonMoving, then gets preempted.
+ // 2. Thread B also tries to change `ref`'s read barrier state with
+ // AtomicSetReadBarrierState from non-gray to gray in
+ // ConcurrentCopying::MarkNonMoving, but fails, as Thread A already
+ // changed it.
+ // 3. Because Thread B failed the previous CAS, it does *not* set the
+ // bit in the mark bitmap for `ref`.
+ // 4. Thread B checks the to-space invariant and calls
+ // ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace: the bit
+ // is not set in the mark bitmap for `ref`; checking that this bit is
+ // set to check the to-space invariant is therefore not a reliable
+ // test.
+ // 5. (Note that eventually, Thread A will resume its execution and set
+ // the bit for `ref` in the mark bitmap.)
+ ? (ref->GetReadBarrierState() == ReadBarrier::GrayState())
+ // It is safe to use the heap mark bitmap otherwise.
+ : (!is_los && mark_bitmap->Test(ref)) || (is_los && los_bitmap->Test(ref));
+
+ // If `ref` is on the allocation stack, then it may not be
+ // marked live, but considered marked/alive (but not
+ // necessarily on the live stack).
+ CHECK(marked_in_non_moving_space_or_los || IsOnAllocStack(ref))
+ << "Unmarked ref that's not on the allocation stack."
+ << " obj=" << obj
+ << " ref=" << ref
+ << " rb_state=" << ref->GetReadBarrierState()
+ << " is_los=" << std::boolalpha << is_los << std::noboolalpha
+ << " is_marking=" << std::boolalpha << is_marking_ << std::noboolalpha
+ << " young_gen=" << std::boolalpha << young_gen_ << std::noboolalpha
+ << " done_scanning="
+ << std::boolalpha << done_scanning_.load(std::memory_order_acquire) << std::noboolalpha
+ << " self=" << Thread::Current();
}
}
@@ -2562,15 +2613,15 @@
if (ReadBarrier::kEnableToSpaceInvariantChecks) {
AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
}
- CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
+ CHECK_EQ(byte_size, java_lang_Object_->GetObjectSize<kVerifyNone>());
dummy_obj->SetClass(java_lang_Object_);
CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
} else {
// Use an int array.
dummy_obj->SetClass(int_array_class);
- CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
+ CHECK(dummy_obj->IsArrayInstance<kVerifyNone>());
int32_t length = (byte_size - data_offset) / component_size;
- mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone>();
dummy_arr->SetLength(length);
CHECK_EQ(dummy_arr->GetLength(), length)
<< "byte_size=" << byte_size << " length=" << length
@@ -2680,17 +2731,17 @@
region_space_->RecordAlloc(to_ref);
}
bytes_allocated = region_space_alloc_size;
- heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
- to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_seq_cst);
- to_space_objects_skipped_.fetch_sub(1, std::memory_order_seq_cst);
+ heap_->num_bytes_allocated_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
+ to_space_bytes_skipped_.fetch_sub(bytes_allocated, std::memory_order_relaxed);
+ to_space_objects_skipped_.fetch_sub(1, std::memory_order_relaxed);
} else {
// Fall back to the non-moving space.
fall_back_to_non_moving = true;
if (kVerboseMode) {
LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
- << to_space_bytes_skipped_.load(std::memory_order_seq_cst)
+ << to_space_bytes_skipped_.load(std::memory_order_relaxed)
<< " skipped_objects="
- << to_space_objects_skipped_.load(std::memory_order_seq_cst);
+ << to_space_objects_skipped_.load(std::memory_order_relaxed);
}
to_ref = heap_->non_moving_space_->Alloc(self, obj_size,
&non_moving_space_bytes_allocated, nullptr, &dummy);
@@ -2742,9 +2793,9 @@
region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
- heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
- to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
- to_space_objects_skipped_.fetch_add(1, std::memory_order_seq_cst);
+ heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_relaxed);
+ to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_relaxed);
+ to_space_objects_skipped_.fetch_add(1, std::memory_order_relaxed);
MutexLock mu(self, skipped_blocks_lock_);
skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
reinterpret_cast<uint8_t*>(to_ref)));
@@ -2911,7 +2962,13 @@
// Since the mark bitmap is still filled in from last GC, we can not use that or else the
// mutator may see references to the from space. Instead, use the Baker pointer itself as
// the mark bit.
- if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
+ //
+ // We need to avoid marking objects that are on allocation stack as that will lead to a
+ // situation (after this GC cycle is finished) where some object(s) are on both allocation
+ // stack and live bitmap. This leads to visiting the same object(s) twice during a heapdump
+ // (b/117426281).
+ if (!IsOnAllocStack(ref) &&
+ ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
// TODO: We don't actually need to scan this object later, we just need to clear the gray
// bit.
// Also make sure the object is marked.
@@ -2920,6 +2977,8 @@
} else {
mark_bitmap->AtomicTestAndSet(ref);
}
+ // We don't need to mark newly allocated objects (those in allocation stack) as they can
+ // only point to to-space objects. Also, they are considered live till the next GC cycle.
PushOntoMarkStack(self, ref);
}
return ref;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 7bd87bd..3f85c71 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -125,8 +125,6 @@
/*boot_oat_begin*/0u,
/*boot_oat_size*/0u,
/*pointer_size*/sizeof(void*),
- /*compile_pic*/false,
- /*is_pic*/false,
ImageHeader::kStorageModeUncompressed,
/*storage_size*/0u);
return new DummyImageSpace(std::move(map),
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 23b2719..5f44a72 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -789,12 +789,12 @@
mark_stack_[mark_stack_pos_++].Assign(obj);
}
- virtual void Finalize() {
+ void Finalize() override {
delete this;
}
// Scans all of the objects
- virtual void Run(Thread* self ATTRIBUTE_UNUSED)
+ void Run(Thread* self ATTRIBUTE_UNUSED) override
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScanObjectParallelVisitor visitor(this);
@@ -852,11 +852,11 @@
const uint8_t minimum_age_;
const bool clear_card_;
- virtual void Finalize() {
+ void Finalize() override {
delete this;
}
- virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
ScanObjectParallelVisitor visitor(this);
accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
size_t cards_scanned = clear_card_
@@ -1009,12 +1009,12 @@
const uintptr_t begin_;
const uintptr_t end_;
- virtual void Finalize() {
+ void Finalize() override {
delete this;
}
// Scans all of the objects
- virtual void Run(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
ScanObjectParallelVisitor visitor(this);
bitmap_->VisitMarkedRange(begin_, end_, visitor);
// Finish by emptying our local mark stack.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 791d037..af9000b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -108,7 +108,8 @@
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
- // Bytes allocated that takes bulk thread-local buffer allocations into account.
+ // Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
+ // non-TLAB object allocations.
size_t bytes_tl_bulk_allocated = 0u;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
@@ -156,10 +157,10 @@
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- size_t num_bytes_allocated_before =
- num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
- new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
+ size_t num_bytes_allocated_before =
+ num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
+ new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
TraceHeapSize(new_num_bytes_allocated);
@@ -212,6 +213,8 @@
// optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
// the allocator_type should be constant propagated.
if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
+ // New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
+ // That's fine.
CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
@@ -394,7 +397,7 @@
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
size_t alloc_size,
bool grow) {
- size_t new_footprint = num_bytes_allocated_.load(std::memory_order_seq_cst) + alloc_size;
+ size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size;
if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
@@ -411,6 +414,8 @@
return false;
}
+// Request a GC if new_num_bytes_allocated is sufficiently large.
+// A call with new_num_bytes_allocated == 0 is a fast no-op.
inline void Heap::CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
ObjPtr<mirror::Object>* obj) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 589e9a4..78e8422 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -51,6 +51,7 @@
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
#include "gc/collector/sticky_mark_sweep.h"
+#include "gc/racing_check.h"
#include "gc/reference_processor.h"
#include "gc/scoped_gc_critical_section.h"
#include "gc/space/bump_pointer_space.h"
@@ -335,9 +336,13 @@
// Requested begin for the alloc space, to follow the mapped image and oat files
uint8_t* request_begin = nullptr;
// Calculate the extra space required after the boot image, see allocations below.
- size_t heap_reservation_size = separate_non_moving_space
- ? non_moving_space_capacity
- : ((is_zygote && foreground_collector_type_ != kCollectorTypeCC) ? capacity_ : 0u);
+ size_t heap_reservation_size = 0u;
+ if (separate_non_moving_space) {
+ heap_reservation_size = non_moving_space_capacity;
+ } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
+ (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
+ heap_reservation_size = capacity_;
+ }
heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
// Load image space(s).
std::vector<std::unique_ptr<space::ImageSpace>> boot_image_spaces;
@@ -414,13 +419,14 @@
// Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
ScopedTrace trace2("Create main mem map");
- if (separate_non_moving_space || !is_zygote) {
+ if (separate_non_moving_space ||
+ !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
main_mem_map_1 = MapAnonymousPreferredAddress(
kMemMapSpaceName[0], request_begin, capacity_, &error_str);
} else {
- // If no separate non-moving space and we are the zygote, the main space must come right
- // after the image space to avoid a gap. This is required since we want the zygote space to
- // be adjacent to the image space.
+ // If no separate non-moving space and we are the zygote or the collector type is GSS,
+ // the main space must come right after the image space to avoid a gap.
+ // This is required since we want the zygote space to be adjacent to the image space.
DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
main_mem_map_1 = MemMap::MapAnonymous(
kMemMapSpaceName[0],
@@ -450,6 +456,7 @@
// Non moving space is always dlmalloc since we currently don't have support for multiple
// active rosalloc spaces.
const size_t size = non_moving_space_mem_map.Size();
+ const void* non_moving_space_mem_map_begin = non_moving_space_mem_map.Begin();
non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(std::move(non_moving_space_mem_map),
"zygote / non moving space",
kDefaultStartingSize,
@@ -457,9 +464,9 @@
size,
size,
/* can_move_objects */ false);
- non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
- << non_moving_space_mem_map.Begin();
+ << non_moving_space_mem_map_begin;
+ non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
}
// Create other spaces based on whether or not we have a moving GC.
@@ -1198,8 +1205,8 @@
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
- uint64_t unique_count = unique_backtrace_count_.load(std::memory_order_relaxed);
- uint64_t seen_count = seen_backtrace_count_.load(std::memory_order_relaxed);
+ uint64_t unique_count = unique_backtrace_count_.load();
+ uint64_t seen_count = seen_backtrace_count_.load();
if (unique_count != 0 || seen_count != 0) {
LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count);
}
@@ -1581,10 +1588,10 @@
// Use signed comparison since freed bytes can be negative when background compaction foreground
// transitions occurs. This is caused by the moving objects from a bump pointer space to a
// free list backed space typically increasing memory footprint due to padding and binning.
- DCHECK_LE(freed_bytes,
- static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
+ RACING_DCHECK_LE(freed_bytes,
+ static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed)));
// Note: This relies on 2s complement for handling negative freed_bytes.
- num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes));
+ num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes), std::memory_order_relaxed);
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
thread_stats->freed_objects += freed_objects;
@@ -1601,10 +1608,10 @@
// ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
// If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
// all the way to zero exactly as the remainder will be subtracted at the next GC.
- size_t bytes_freed = num_bytes_freed_revoke_.load();
- CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed),
+ size_t bytes_freed = num_bytes_freed_revoke_.load(std::memory_order_relaxed);
+ CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed, std::memory_order_relaxed),
bytes_freed) << "num_bytes_freed_revoke_ underflow";
- CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed),
+ CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed, std::memory_order_relaxed),
bytes_freed) << "num_bytes_allocated_ underflow";
GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
}
@@ -2029,7 +2036,7 @@
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
- uint32_t before_allocated = num_bytes_allocated_.load();
+ uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -2165,7 +2172,7 @@
ScopedObjectAccess soa(self);
soa.Vm()->UnloadNativeLibraries();
}
- int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
+ int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
int32_t delta_allocated = before_allocated - after_allocated;
std::string saved_str;
if (delta_allocated >= 0) {
@@ -2279,13 +2286,13 @@
}
}
- virtual bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const {
+ bool ShouldSweepSpace(space::ContinuousSpace* space ATTRIBUTE_UNUSED) const override {
// Don't sweep any spaces since we probably blasted the internal accounting of the free list
// allocator.
return false;
}
- virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
+ mirror::Object* MarkNonForwardedObject(mirror::Object* obj) override
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
@@ -3798,7 +3805,7 @@
void Heap::IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke) {
size_t previous_num_bytes_freed_revoke =
- num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst);
+ num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_relaxed);
// Check the updated value is less than the number of bytes allocated. There is a risk of
// execution being suspended between the increment above and the CHECK below, leading to
// the use of previous_num_bytes_freed_revoke in the comparison.
@@ -4017,9 +4024,9 @@
StackHandleScope<1> hs(self);
auto h = hs.NewHandleWrapper(obj);
CollectGarbage(/* clear_soft_references */ false);
- unique_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
+ unique_backtrace_count_.fetch_add(1);
} else {
- seen_backtrace_count_.fetch_add(1, std::memory_order_seq_cst);
+ seen_backtrace_count_.fetch_add(1);
}
}
}
@@ -4200,7 +4207,7 @@
explicit TriggerPostForkCCGcTask(uint64_t target_time) : HeapTask(target_time) {}
void Run(Thread* self) override {
gc::Heap* heap = Runtime::Current()->GetHeap();
- // Trigger a GC, if not already done. The first GC after fork, whenever
+ // Trigger a GC, if not already done. The first GC after fork, whenever it
// takes place, will adjust the thresholds to normal levels.
if (heap->max_allowed_footprint_ == heap->growth_limit_) {
heap->RequestConcurrentGC(self, kGcCauseBackground, false);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 90bac20..6c4b936 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -477,8 +477,9 @@
void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
// Returns the number of bytes currently allocated.
+ // The result should be treated as an approximation, if it is being concurrently updated.
size_t GetBytesAllocated() const {
- return num_bytes_allocated_.load(std::memory_order_seq_cst);
+ return num_bytes_allocated_.load(std::memory_order_relaxed);
}
// Returns the number of objects currently allocated.
@@ -506,7 +507,7 @@
// were specified. Android apps start with a growth limit (small heap size) which is
// cleared/extended for large apps.
size_t GetMaxMemory() const {
- // There is some race conditions in the allocation code that can cause bytes allocated to
+ // There are some race conditions in the allocation code that can cause bytes allocated to
// become larger than growth_limit_ in rare cases.
return std::max(GetBytesAllocated(), growth_limit_);
}
@@ -528,7 +529,7 @@
// Returns how much free memory we have until we need to grow the heap to perform an allocation.
// Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst);
+ size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
size_t total_memory = GetTotalMemory();
// Make sure we don't get a negative number.
return total_memory - std::min(total_memory, byte_allocated);
@@ -1222,7 +1223,8 @@
// Since the heap was created, how many objects have been freed.
uint64_t total_objects_freed_ever_;
- // Number of bytes allocated. Adjusted after each allocation and free.
+ // Number of bytes currently allocated and not yet reclaimed. Includes active
+ // TLABS in their entirety, even if they have not yet been parceled out.
Atomic<size_t> num_bytes_allocated_;
// Number of registered native bytes allocated since the last time GC was
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 7cbad3b..05a04f2 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -96,7 +96,7 @@
}
class ZygoteHeapTest : public CommonRuntimeTest {
- void SetUpRuntimeOptions(RuntimeOptions* options) {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
options->push_back(std::make_pair("-Xzygote", nullptr));
}
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index 6caca84..3754129 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -83,7 +83,12 @@
}
TEST_F(VerificationTest, IsValidClassInHeap) {
- TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+ // Now that the String class is allocated in the non-moving space when the
+ // runtime is running without a boot image (which is the case in this gtest),
+ // and we run with AddressSanizer, it is possible that the (presumably
+ // invalid) memory location `uint_klass - kObjectAlignment` tested below is
+ // poisoned when running with AddressSanizer. Disable this test in that case.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedObjectAccess soa(Thread::Current());
VariableSizedHandleScope hs(soa.Self());
Handle<mirror::String> string(
@@ -106,7 +111,13 @@
}
TEST_F(VerificationTest, DumpValidObjectInfo) {
- TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+ // Now that the String class is allocated in the non-moving space when the
+ // runtime is running without a boot image (which is the case in this gtest),
+ // and we run with AddressSanizer, it is possible that the calls to
+ // Verification::DumpObjectInfo below involving the String class object
+ // (`string->GetClass()`, `uint_klass`, etc.) access poisoned memory when they
+ // call Verification::DumpRAMAroundAddress. Disable this test in that case.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
@@ -126,7 +137,13 @@
}
TEST_F(VerificationTest, LogHeapCorruption) {
- TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
+ // Now that the String class is allocated in the non-moving space when the
+ // runtime is running without a boot image (which is the case in this gtest),
+ // and we run with AddressSanizer, it is possible that the call to
+ // Verification::LogHeapCorruption below involving the String class object
+ // (`string->GetClass()`) accesses poisoned memory when it calls
+ // Verification::DumpRAMAroundAddress. Disable this test in that case.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
diff --git a/runtime/gc/racing_check.h b/runtime/gc/racing_check.h
new file mode 100644
index 0000000..a81a513
--- /dev/null
+++ b/runtime/gc/racing_check.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_RACING_CHECK_H_
+#define ART_RUNTIME_GC_RACING_CHECK_H_
+
+#include <unistd.h>
+#include <android-base/logging.h>
+
+// For checking purposes, we occasionally compare global counter values.
+// These counters are generally updated without ordering constraints, and hence
+// we may actually see inconsistent values when checking. To minimize spurious
+// failures, try twice with an intervening short sleep. This is a hack not used
+// in production builds.
+#define RACING_DCHECK_LE(x, y) \
+ if (::android::base::kEnableDChecks && ((x) > (y))) { usleep(1000); CHECK_LE(x, y); }
+
+#endif // ART_RUNTIME_GC_RACING_CHECK_H_
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index fe4124d..c212bad 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -276,7 +276,7 @@
explicit ClearedReferenceTask(jobject cleared_references)
: HeapTask(NanoTime()), cleared_references_(cleared_references) {
}
- virtual void Run(Thread* thread) {
+ void Run(Thread* thread) override {
ScopedObjectAccess soa(thread);
jvalue args[1];
args[0].l = cleared_references_;
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 4c58549..20f7a93 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -83,8 +83,8 @@
inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
if (ret != nullptr) {
- objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
- bytes_allocated_.fetch_add(num_bytes, std::memory_order_seq_cst);
+ objects_allocated_.fetch_add(1, std::memory_order_relaxed);
+ bytes_allocated_.fetch_add(num_bytes, std::memory_order_relaxed);
}
return ret;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 42453f5..80af700 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -206,8 +206,8 @@
}
void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
- objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_seq_cst);
- bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_seq_cst);
+ objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
+ bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
thread->SetTlab(nullptr, nullptr, nullptr);
}
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 02e84b5..59d4d27 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -155,8 +155,8 @@
// Record objects / bytes freed.
void RecordFree(int32_t objects, int32_t bytes) {
- objects_allocated_.fetch_sub(objects, std::memory_order_seq_cst);
- bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst);
+ objects_allocated_.fetch_sub(objects, std::memory_order_relaxed);
+ bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed);
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index f308f63..1b3cb40 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -28,6 +28,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/bit_memory_region.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/file_utils.h"
@@ -38,13 +39,16 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/utils.h"
+#include "class_root.h"
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "image-inl.h"
#include "image_space_fs.h"
+#include "intern_table-inl.h"
#include "mirror/class-inl.h"
+#include "mirror/executable.h"
#include "mirror/object-inl.h"
#include "mirror/object-refvisitor-inl.h"
#include "oat_file.h"
@@ -55,7 +59,6 @@
namespace gc {
namespace space {
-using android::base::StringAppendF;
using android::base::StringPrintf;
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
@@ -181,7 +184,7 @@
bool have_android_data = false;
*dalvik_cache_exists = false;
GetDalvikCache(GetInstructionSetString(image_isa),
- /* create_if_absent */ true,
+ /*create_if_absent=*/ true,
dalvik_cache,
&have_android_data,
dalvik_cache_exists,
@@ -239,142 +242,37 @@
return true;
}
-// Relocate the image at image_location to dest_filename and relocate it by a random amount.
-static bool RelocateImage(const char* image_location,
- const char* dest_directory,
- InstructionSet isa,
- std::string* error_msg) {
- // We should clean up so we are more likely to have room for the image.
- if (Runtime::Current()->IsZygote()) {
- LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
- PruneDalvikCache(isa);
- }
-
- std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
-
- std::string input_image_location_arg("--input-image-location=");
- input_image_location_arg += image_location;
-
- std::string output_image_directory_arg("--output-image-directory=");
- output_image_directory_arg += dest_directory;
-
- std::string instruction_set_arg("--instruction-set=");
- instruction_set_arg += GetInstructionSetString(isa);
-
- std::string base_offset_arg("--base-offset-delta=");
- StringAppendF(&base_offset_arg, "%d", ChooseRelocationOffsetDelta());
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
-
- argv.push_back(input_image_location_arg);
- argv.push_back(output_image_directory_arg);
-
- argv.push_back(instruction_set_arg);
- argv.push_back(base_offset_arg);
-
- std::string command_line(android::base::Join(argv, ' '));
- LOG(INFO) << "RelocateImage: " << command_line;
- return Exec(argv, error_msg);
-}
-
-static bool VerifyImage(const char* image_location,
- const char* dest_directory,
- InstructionSet isa,
- std::string* error_msg) {
- std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
-
- std::string input_image_location_arg("--input-image-location=");
- input_image_location_arg += image_location;
-
- std::string output_image_directory_arg("--output-image-directory=");
- output_image_directory_arg += dest_directory;
-
- std::string instruction_set_arg("--instruction-set=");
- instruction_set_arg += GetInstructionSetString(isa);
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
-
- argv.push_back(input_image_location_arg);
- argv.push_back(output_image_directory_arg);
-
- argv.push_back(instruction_set_arg);
-
- argv.push_back("--verify");
-
- std::string command_line(android::base::Join(argv, ' '));
- LOG(INFO) << "VerifyImage: " << command_line;
- return Exec(argv, error_msg);
-}
-
-static ImageHeader* ReadSpecificImageHeader(const char* filename, std::string* error_msg) {
+static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
+ std::string* error_msg) {
std::unique_ptr<ImageHeader> hdr(new ImageHeader);
if (!ReadSpecificImageHeader(filename, hdr.get())) {
*error_msg = StringPrintf("Unable to read image header for %s", filename);
return nullptr;
}
- return hdr.release();
+ return hdr;
}
-ImageHeader* ImageSpace::ReadImageHeader(const char* image_location,
- const InstructionSet image_isa,
- std::string* error_msg) {
+std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
+ const InstructionSet image_isa,
+ std::string* error_msg) {
std::string system_filename;
bool has_system = false;
std::string cache_filename;
bool has_cache = false;
bool dalvik_cache_exists = false;
bool is_global_cache = false;
- if (FindImageFilename(image_location, image_isa, &system_filename, &has_system,
- &cache_filename, &dalvik_cache_exists, &has_cache, &is_global_cache)) {
- if (Runtime::Current()->ShouldRelocate()) {
- if (has_system && has_cache) {
- std::unique_ptr<ImageHeader> sys_hdr(new ImageHeader);
- std::unique_ptr<ImageHeader> cache_hdr(new ImageHeader);
- if (!ReadSpecificImageHeader(system_filename.c_str(), sys_hdr.get())) {
- *error_msg = StringPrintf("Unable to read image header for %s at %s",
- image_location, system_filename.c_str());
- return nullptr;
- }
- if (!ReadSpecificImageHeader(cache_filename.c_str(), cache_hdr.get())) {
- *error_msg = StringPrintf("Unable to read image header for %s at %s",
- image_location, cache_filename.c_str());
- return nullptr;
- }
- if (sys_hdr->GetOatChecksum() != cache_hdr->GetOatChecksum()) {
- *error_msg = StringPrintf("Unable to find a relocated version of image file %s",
- image_location);
- return nullptr;
- }
- return cache_hdr.release();
- } else if (!has_cache) {
- *error_msg = StringPrintf("Unable to find a relocated version of image file %s",
- image_location);
- return nullptr;
- } else if (!has_system && has_cache) {
- // This can probably just use the cache one.
- return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
- }
- } else {
- // We don't want to relocate, Just pick the appropriate one if we have it and return.
- if (has_system && has_cache) {
- // We want the cache if the checksum matches, otherwise the system.
- std::unique_ptr<ImageHeader> system(ReadSpecificImageHeader(system_filename.c_str(),
- error_msg));
- std::unique_ptr<ImageHeader> cache(ReadSpecificImageHeader(cache_filename.c_str(),
- error_msg));
- if (system.get() == nullptr ||
- (cache.get() != nullptr && cache->GetOatChecksum() == system->GetOatChecksum())) {
- return cache.release();
- } else {
- return system.release();
- }
- } else if (has_system) {
- return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
- } else if (has_cache) {
- return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
- }
+ if (FindImageFilename(image_location,
+ image_isa,
+ &system_filename,
+ &has_system,
+ &cache_filename,
+ &dalvik_cache_exists,
+ &has_cache,
+ &is_global_cache)) {
+ if (has_system) {
+ return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
+ } else if (has_cache) {
+ return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
}
}
@@ -483,10 +381,66 @@
// nested class), but not declare functions in the header.
class ImageSpace::Loader {
public:
+ static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
+ const char* image_location,
+ bool validate_oat_file,
+ const OatFile* oat_file,
+ /*inout*/MemMap* image_reservation,
+ /*inout*/MemMap* oat_reservation,
+ /*out*/std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
+ std::unique_ptr<ImageSpace> space = Init(image_filename,
+ image_location,
+ validate_oat_file,
+ oat_file,
+ &logger,
+ image_reservation,
+ oat_reservation,
+ error_msg);
+ if (space != nullptr) {
+ TimingLogger::ScopedTiming timing("RelocateImage", &logger);
+ ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
+ if (!RelocateInPlace(*image_header,
+ space->GetMemMap()->Begin(),
+ space->GetLiveBitmap(),
+ oat_file,
+ error_msg)) {
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ CHECK_EQ(runtime->GetResolutionMethod(),
+ image_header->GetImageMethod(ImageHeader::kResolutionMethod));
+ CHECK_EQ(runtime->GetImtConflictMethod(),
+ image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
+ CHECK_EQ(runtime->GetImtUnimplementedMethod(),
+ image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
+ image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
+ image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
+ image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
+ CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
+ image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
+
+ VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
+ }
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG_STREAM(INFO));
+ }
+ return space;
+ }
+
static std::unique_ptr<ImageSpace> Init(const char* image_filename,
const char* image_location,
bool validate_oat_file,
const OatFile* oat_file,
+ TimingLogger* logger,
/*inout*/MemMap* image_reservation,
/*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
@@ -494,12 +448,11 @@
CHECK(image_filename != nullptr);
CHECK(image_location != nullptr);
- TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image));
VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
std::unique_ptr<File> file;
{
- TimingLogger::ScopedTiming timing("OpenImageFile", &logger);
+ TimingLogger::ScopedTiming timing("OpenImageFile", logger);
file.reset(OS::OpenFileForReading(image_filename));
if (file == nullptr) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
@@ -509,7 +462,7 @@
ImageHeader temp_image_header;
ImageHeader* image_header = &temp_image_header;
{
- TimingLogger::ScopedTiming timing("ReadImageHeader", &logger);
+ TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
bool success = file->ReadFully(image_header, sizeof(*image_header));
if (!success || !image_header->IsValid()) {
*error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
@@ -586,24 +539,10 @@
image_filename,
image_location,
*image_header,
- image_header->GetImageBegin(),
file->Fd(),
logger,
image_reservation,
- (image_reservation == nullptr && image_header->IsPic()) ? nullptr : error_msg);
- // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
- // relocate in-place.
- if (!map.IsValid() && image_reservation == nullptr && image_header->IsPic()) {
- map = LoadImageFile(image_filename,
- image_location,
- *image_header,
- /* address */ nullptr,
- file->Fd(),
- logger,
- /* image_reservation */ nullptr,
- error_msg);
- }
- // Were we able to load something and continue?
+ error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());
return nullptr;
@@ -611,10 +550,11 @@
DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
- PROT_READ, MAP_PRIVATE,
+ PROT_READ,
+ MAP_PRIVATE,
file->Fd(),
image_bitmap_offset,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
image_filename,
error_msg);
if (!image_bitmap_map.IsValid()) {
@@ -624,7 +564,7 @@
// Loaded the map, use the image header from the file now in case we patch it with
// RelocateInPlace.
image_header = reinterpret_cast<ImageHeader*>(map.Begin());
- const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst);
+ const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
image_filename,
bitmap_index));
@@ -634,7 +574,7 @@
uint8_t* const image_end = map.Begin() + image_objects.End();
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
{
- TimingLogger::ScopedTiming timing("CreateImageBitmap", &logger);
+ TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
bitmap.reset(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(
bitmap_name,
@@ -647,16 +587,6 @@
return nullptr;
}
}
- {
- TimingLogger::ScopedTiming timing("RelocateImage", &logger);
- if (!RelocateInPlace(*image_header,
- map.Begin(),
- bitmap.get(),
- oat_file,
- error_msg)) {
- return nullptr;
- }
- }
// We only want the mirror object, not the ArtFields and ArtMethods.
std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
image_location,
@@ -670,7 +600,7 @@
// Object::SizeOf() which VerifyImageAllocations() calls, are not
// set yet at this point.
if (oat_file == nullptr) {
- TimingLogger::ScopedTiming timing("OpenOatFile", &logger);
+ TimingLogger::ScopedTiming timing("OpenOatFile", logger);
space->oat_file_ = OpenOatFile(*space, image_filename, oat_reservation, error_msg);
if (space->oat_file_ == nullptr) {
DCHECK(!error_msg->empty());
@@ -682,7 +612,7 @@
}
if (validate_oat_file) {
- TimingLogger::ScopedTiming timing("ValidateOatFile", &logger);
+ TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
CHECK(space->oat_file_ != nullptr);
if (!ImageSpace::ValidateOatFile(*space->oat_file_, error_msg)) {
DCHECK(!error_msg->empty());
@@ -690,60 +620,6 @@
}
}
- Runtime* runtime = Runtime::Current();
-
- // If oat_file is null, then it is the boot image space. Use oat_file_non_owned_ from the space
- // to set the runtime methods.
- CHECK_EQ(oat_file != nullptr, image_header->IsAppImage());
- if (image_header->IsAppImage()) {
- CHECK_EQ(runtime->GetResolutionMethod(),
- image_header->GetImageMethod(ImageHeader::kResolutionMethod));
- CHECK_EQ(runtime->GetImtConflictMethod(),
- image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
- CHECK_EQ(runtime->GetImtUnimplementedMethod(),
- image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
- image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
- image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
- image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
- CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
- } else if (!runtime->HasResolutionMethod()) {
- runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
- runtime->SetResolutionMethod(image_header->GetImageMethod(ImageHeader::kResolutionMethod));
- runtime->SetImtConflictMethod(image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
- runtime->SetImtUnimplementedMethod(
- image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
- CalleeSaveType::kSaveAllCalleeSaves);
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod),
- CalleeSaveType::kSaveRefsOnly);
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
- CalleeSaveType::kSaveRefsAndArgs);
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod),
- CalleeSaveType::kSaveEverything);
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit),
- CalleeSaveType::kSaveEverythingForClinit);
- runtime->SetCalleeSaveMethod(
- image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck),
- CalleeSaveType::kSaveEverythingForSuspendCheck);
- }
-
- VLOG(image) << "ImageSpace::Init exiting " << *space.get();
- if (VLOG_IS_ON(image)) {
- logger.Dump(LOG_STREAM(INFO));
- }
return space;
}
@@ -751,12 +627,12 @@
static MemMap LoadImageFile(const char* image_filename,
const char* image_location,
const ImageHeader& image_header,
- uint8_t* address,
int fd,
- TimingLogger& logger,
+ TimingLogger* logger,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg) {
- TimingLogger::ScopedTiming timing("MapImageFile", &logger);
+ TimingLogger::ScopedTiming timing("MapImageFile", logger);
+ uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
if (storage_mode == ImageHeader::kStorageModeUncompressed) {
return MemMap::MapFileAtAddress(address,
@@ -764,10 +640,10 @@
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
fd,
- /* start */ 0,
- /* low_4gb */ true,
+ /*start=*/ 0,
+ /*low_4gb=*/ true,
image_filename,
- /* reuse */ false,
+ /*reuse=*/ false,
image_reservation,
error_msg);
}
@@ -786,8 +662,8 @@
address,
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
- /* low_4gb */ true,
- /* reuse */ false,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
image_reservation,
error_msg);
if (map.IsValid()) {
@@ -797,8 +673,8 @@
PROT_READ,
MAP_PRIVATE,
fd,
- /* offset */ 0,
- /* low_4gb */ false,
+ /*start=*/ 0,
+ /*low_4gb=*/ false,
image_filename,
error_msg);
if (!temp_map.IsValid()) {
@@ -808,7 +684,7 @@
memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
const uint64_t start = NanoTime();
// LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
- TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
+ TimingLogger::ScopedTiming timing2("LZ4 decompress image", logger);
const size_t decompressed_size = LZ4_decompress_safe(
reinterpret_cast<char*>(temp_map.Begin()) + sizeof(ImageHeader),
reinterpret_cast<char*>(map.Begin()) + decompress_offset,
@@ -1008,7 +884,7 @@
}
if (obj->IsClass()) {
- mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Class* klass = obj->AsClass<kVerifyNone>();
// Fixup super class before visiting instance fields which require
// information from their super class to calculate offsets.
mirror::Class* super_class = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
@@ -1026,12 +902,10 @@
*this);
// Note that this code relies on no circular dependencies.
// We want to use our own class loader and not the one in the image.
- if (obj->IsClass<kVerifyNone, kWithoutReadBarrier>()) {
- mirror::Class* as_klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
+ if (obj->IsClass<kVerifyNone>()) {
+ mirror::Class* as_klass = obj->AsClass<kVerifyNone>();
FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
- as_klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(as_klass,
- pointer_size_,
- visitor);
+ as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor);
// Deal with the pointer arrays. Use the helper function since multiple classes can reference
// the same arrays.
mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
@@ -1099,7 +973,7 @@
fixup_heap_objects_(fixup_heap_objects),
pointer_size_(pointer_size) {}
- virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS {
+ void Visit(ArtMethod* method) override NO_THREAD_SAFETY_ANALYSIS {
// TODO: Separate visitor for runtime vs normal methods.
if (UNLIKELY(method->IsRuntimeMethod())) {
ImtConflictTable* table = method->GetImtConflictTable(pointer_size_);
@@ -1132,29 +1006,20 @@
template<typename... Args>
explicit FixupArtFieldVisitor(Args... args) : FixupVisitor(args...) {}
- virtual void Visit(ArtField* field) NO_THREAD_SAFETY_ANALYSIS {
+ void Visit(ArtField* field) override NO_THREAD_SAFETY_ANALYSIS {
field->UpdateObjects(ForwardObjectAdapter(this));
}
};
// Relocate an image space mapped at target_base which possibly used to be at a different base
- // address. Only needs a single image space, not one for both source and destination.
- // In place means modifying a single ImageSpace in place rather than relocating from one ImageSpace
- // to another.
+ // address. In place means modifying a single ImageSpace in place rather than relocating from
+ // one ImageSpace to another.
static bool RelocateInPlace(ImageHeader& image_header,
uint8_t* target_base,
accounting::ContinuousSpaceBitmap* bitmap,
const OatFile* app_oat_file,
std::string* error_msg) {
DCHECK(error_msg != nullptr);
- if (!image_header.IsPic()) {
- if (image_header.GetImageBegin() == target_base) {
- return true;
- }
- *error_msg = StringPrintf("Cannot relocate non-pic image for oat file %s",
- (app_oat_file != nullptr) ? app_oat_file->GetLocation().c_str() : "");
- return false;
- }
// Set up sections.
uint32_t boot_image_begin = 0;
uint32_t boot_image_end = 0;
@@ -1247,7 +1112,7 @@
CHECK_EQ(image_header.GetImageBegin(), target_base);
// Fix up dex cache DexFile pointers.
auto* dex_caches = image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)->
- AsObjectArray<mirror::DexCache, kVerifyNone, kWithoutReadBarrier>();
+ AsObjectArray<mirror::DexCache, kVerifyNone>();
for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
// Fix up dex cache pointers.
@@ -1358,6 +1223,24 @@
FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
temp_table.VisitRoots(root_visitor);
}
+ // Fix up the intern table.
+ const auto& intern_table_section = image_header.GetInternedStringsSection();
+ if (intern_table_section.Size() > 0u) {
+ TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
+ ScopedObjectAccess soa(Thread::Current());
+ // Fixup the pointers in the newly written intern table to contain image addresses.
+ InternTable temp_intern_table;
+ // Note that we require that ReadFromMemory does not make an internal copy of the elements
+ // so that the VisitRoots() will update the memory directly rather than the copies.
+ FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
+ temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
+ [&](InternTable::UnorderedSet& strings)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (GcRoot<mirror::String>& root : strings) {
+ root = GcRoot<mirror::String>(fixup_adapter(root.Read<kWithoutReadBarrier>()));
+ }
+ });
+ }
}
if (VLOG_IS_ON(image)) {
logger.Dump(LOG_STREAM(INFO));
@@ -1374,13 +1257,16 @@
CHECK(image_header.GetOatDataBegin() != nullptr);
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ uint8_t* oat_data_begin = image_header.GetOatDataBegin();
+ if (oat_reservation != nullptr) {
+ oat_data_begin += oat_reservation->Begin() - image_header.GetOatFileBegin();
+ }
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- image_header.GetOatDataBegin(),
!Runtime::Current()->IsAotCompiler(),
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
oat_reservation,
error_msg));
if (oat_file == nullptr) {
@@ -1390,6 +1276,7 @@
error_msg->c_str());
return nullptr;
}
+ CHECK(oat_data_begin == oat_file->Begin());
uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
uint32_t image_oat_checksum = image_header.GetOatChecksum();
if (oat_checksum != image_oat_checksum) {
@@ -1400,17 +1287,6 @@
image.GetName());
return nullptr;
}
- int32_t image_patch_delta = image_header.GetPatchDelta();
- int32_t oat_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
- if (oat_patch_delta != image_patch_delta && !image_header.CompilePic()) {
- // We should have already relocated by this point. Bail out.
- *error_msg = StringPrintf("Failed to match oat file patch delta %d to expected patch delta %d "
- "in image %s",
- oat_patch_delta,
- image_patch_delta,
- image.GetName());
- return nullptr;
- }
return oat_file;
}
@@ -1465,6 +1341,7 @@
/*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, filename, &locations, error_msg)) {
@@ -1503,7 +1380,8 @@
filename = GetSystemImageFilename(location.c_str(), image_isa_);
spaces.push_back(Load(location,
filename,
- /* validate_oat_file */ false,
+ /*validate_oat_file=*/ false,
+ &logger,
&image_reservation,
&oat_reservation,
error_msg));
@@ -1515,18 +1393,25 @@
return false;
}
+ MaybeRelocateSpaces(spaces, &logger);
+ InitRuntimeMethods(spaces);
*extra_reservation = std::move(local_extra_reservation);
+ VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
boot_image_spaces->swap(spaces);
+
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG_STREAM(INFO));
+ }
return true;
}
bool LoadFromDalvikCache(
- bool validate_system_checksums,
bool validate_oat_file,
size_t extra_reservation_size,
/*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
DCHECK(DalvikCacheExists());
std::vector<std::string> locations;
if (!GetBootClassPathImageLocations(image_location_, cache_filename_, &locations, error_msg)) {
@@ -1575,42 +1460,549 @@
spaces.push_back(Load(location,
filename,
validate_oat_file,
+ &logger,
&image_reservation,
&oat_reservation,
error_msg));
if (spaces.back() == nullptr) {
return false;
}
- if (validate_system_checksums) {
- ImageHeader system_hdr;
- std::string system_filename = GetSystemImageFilename(location.c_str(), image_isa_);
- if (!ReadSpecificImageHeader(system_filename.c_str(), &system_hdr)) {
- *error_msg = StringPrintf("Cannot read header of %s", system_filename.c_str());
- return false;
- }
- if (spaces.back()->GetImageHeader().GetOatChecksum() != system_hdr.GetOatChecksum()) {
- *error_msg = StringPrintf("Checksum mismatch: %u(%s) vs %u(%s)",
- spaces.back()->GetImageHeader().GetOatChecksum(),
- filename.c_str(),
- system_hdr.GetOatChecksum(),
- system_filename.c_str());
- return false;
- }
- }
}
if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
return false;
}
+ MaybeRelocateSpaces(spaces, &logger);
+ InitRuntimeMethods(spaces);
*extra_reservation = std::move(local_extra_reservation);
boot_image_spaces->swap(spaces);
+
+ VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
+ if (VLOG_IS_ON(image)) {
+ logger.Dump(LOG_STREAM(INFO));
+ }
return true;
}
private:
+ template <typename T>
+ ALWAYS_INLINE static T* RelocatedAddress(T* src, uint32_t diff) {
+ DCHECK(src != nullptr);
+ return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff);
+ }
+
+ template <bool kMayBeNull = true, typename T>
+ ALWAYS_INLINE static void PatchGcRoot(uint32_t diff, /*inout*/GcRoot<T>* root)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(sizeof(GcRoot<mirror::Class*>) == sizeof(uint32_t), "GcRoot size check");
+ T* old_value = root->template Read<kWithoutReadBarrier>();
+ DCHECK(kMayBeNull || old_value != nullptr);
+ if (!kMayBeNull || old_value != nullptr) {
+ *root = GcRoot<T>(RelocatedAddress(old_value, diff));
+ }
+ }
+
+ template <PointerSize kPointerSize, bool kMayBeNull = true, typename T>
+ ALWAYS_INLINE static void PatchNativePointer(uint32_t diff, /*inout*/T** entry) {
+ if (kPointerSize == PointerSize::k64) {
+ uint64_t* raw_entry = reinterpret_cast<uint64_t*>(entry);
+ T* old_value = reinterpret_cast64<T*>(*raw_entry);
+ DCHECK(kMayBeNull || old_value != nullptr);
+ if (!kMayBeNull || old_value != nullptr) {
+ T* new_value = RelocatedAddress(old_value, diff);
+ *raw_entry = reinterpret_cast64<uint64_t>(new_value);
+ }
+ } else {
+ uint32_t* raw_entry = reinterpret_cast<uint32_t*>(entry);
+ T* old_value = reinterpret_cast32<T*>(*raw_entry);
+ DCHECK(kMayBeNull || old_value != nullptr);
+ if (!kMayBeNull || old_value != nullptr) {
+ T* new_value = RelocatedAddress(old_value, diff);
+ *raw_entry = reinterpret_cast32<uint32_t>(new_value);
+ }
+ }
+ }
+
+ class PatchedObjectsMap {
+ public:
+ PatchedObjectsMap(uint8_t* image_space_begin, size_t size)
+ : image_space_begin_(image_space_begin),
+ data_(new uint8_t[BitsToBytesRoundUp(NumLocations(size))]),
+ visited_objects_(data_.get(), /*bit_start=*/ 0u, NumLocations(size)) {
+ DCHECK_ALIGNED(image_space_begin_, kObjectAlignment);
+ std::memset(data_.get(), 0, BitsToBytesRoundUp(NumLocations(size)));
+ }
+
+ ALWAYS_INLINE bool IsVisited(mirror::Object* object) const {
+ return visited_objects_.LoadBit(GetIndex(object));
+ }
+
+ ALWAYS_INLINE void MarkVisited(mirror::Object* object) {
+ DCHECK(!IsVisited(object));
+ visited_objects_.StoreBit(GetIndex(object), /*value=*/ true);
+ }
+
+ private:
+ static size_t NumLocations(size_t size) {
+ DCHECK_ALIGNED(size, kObjectAlignment);
+ return size / kObjectAlignment;
+ }
+
+ size_t GetIndex(mirror::Object* object) const {
+ DCHECK_ALIGNED(object, kObjectAlignment);
+ return (reinterpret_cast<uint8_t*>(object) - image_space_begin_) / kObjectAlignment;
+ }
+
+ uint8_t* const image_space_begin_;
+ const std::unique_ptr<uint8_t[]> data_;
+ BitMemoryRegion visited_objects_;
+ };
+
+ class PatchArtFieldVisitor final : public ArtFieldVisitor {
+ public:
+ explicit PatchArtFieldVisitor(uint32_t diff)
+ : diff_(diff) {}
+
+ void Visit(ArtField* field) override REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchGcRoot</*kMayBeNull=*/ false>(diff_, &field->DeclaringClassRoot());
+ }
+
+ private:
+ const uint32_t diff_;
+ };
+
+ template <PointerSize kPointerSize>
+ class PatchArtMethodVisitor final : public ArtMethodVisitor {
+ public:
+ explicit PatchArtMethodVisitor(uint32_t diff)
+ : diff_(diff) {}
+
+ void Visit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchGcRoot(diff_, &method->DeclaringClassRoot());
+ void** data_address = PointerAddress(method, ArtMethod::DataOffset(kPointerSize));
+ PatchNativePointer<kPointerSize>(diff_, data_address);
+ void** entrypoint_address =
+ PointerAddress(method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
+ PatchNativePointer<kPointerSize>(diff_, entrypoint_address);
+ }
+
+ private:
+ void** PointerAddress(ArtMethod* method, MemberOffset offset) {
+ return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(method) + offset.Uint32Value());
+ }
+
+ const uint32_t diff_;
+ };
+
+ class ClassTableVisitor final {
+ public:
+ explicit ClassTableVisitor(uint32_t diff) : diff_(diff) {}
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(root->AsMirrorPtr() != nullptr);
+ root->Assign(RelocatedAddress(root->AsMirrorPtr(), diff_));
+ }
+
+ private:
+ const uint32_t diff_;
+ };
+
+ template <PointerSize kPointerSize>
+ class PatchObjectVisitor final {
+ public:
+ explicit PatchObjectVisitor(uint32_t diff)
+ : diff_(diff) {}
+
+ void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // A mirror::Class object consists of
+ // - instance fields inherited from j.l.Object,
+ // - instance fields inherited from j.l.Class,
+ // - embedded tables (vtable, interface method table),
+ // - static fields of the class itself.
+ // The reference fields are at the start of each field section (this is how the
+ // ClassLinker orders fields; except when that would create a gap between superclass
+ // fields and the first reference of the subclass due to alignment, it can be filled
+ // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
+
+ DCHECK_ALIGNED(klass, kObjectAlignment);
+ static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
+ // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
+ // This should be the only reference field in j.l.Object and we assert that below.
+ PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
+ // Then patch the reference instance fields described by j.l.Class.class.
+ // Use the sizeof(Object) to determine where these reference fields start;
+ // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
+ // after patching but the j.l.Class may not have been patched yet.
+ mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
+ DCHECK_NE(num_reference_instance_fields, 0u);
+ static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
+ MemberOffset instance_field_offset(sizeof(mirror::Object));
+ for (size_t i = 0; i != num_reference_instance_fields; ++i) {
+ PatchReferenceField(klass, instance_field_offset);
+ static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+ "Heap reference sizes equality check.");
+ instance_field_offset =
+ MemberOffset(instance_field_offset.Uint32Value() + kHeapReferenceSize);
+ }
+ // Now that we have patched the `super_class_`, if this is the j.l.Class.class,
+ // we can get a reference to j.l.Object.class and assert that it has only one
+ // reference instance field (the `klass_` patched above).
+ if (kIsDebugBuild && klass == class_class) {
+ mirror::Class* object_class = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
+ CHECK_EQ(object_class->NumReferenceInstanceFields<kVerifyNone>(), 1u);
+ }
+ // Then patch static fields.
+ size_t num_reference_static_fields = klass->NumReferenceStaticFields<kVerifyNone>();
+ if (num_reference_static_fields != 0u) {
+ MemberOffset static_field_offset =
+ klass->GetFirstReferenceStaticFieldOffset<kVerifyNone>(kPointerSize);
+ for (size_t i = 0; i != num_reference_static_fields; ++i) {
+ PatchReferenceField(klass, static_field_offset);
+ static_assert(sizeof(mirror::HeapReference<mirror::Object>) == kHeapReferenceSize,
+ "Heap reference sizes equality check.");
+ static_field_offset =
+ MemberOffset(static_field_offset.Uint32Value() + kHeapReferenceSize);
+ }
+ }
+ // Then patch native pointers.
+ klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
+ }
+
+ template <typename T>
+ T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ptr != nullptr) {
+ ptr = RelocatedAddress(ptr, diff_);
+ }
+ return ptr;
+ }
+
+ void VisitPointerArray(mirror::PointerArray* pointer_array)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Fully patch the pointer array, including the `klass_` field.
+ PatchReferenceField</*kMayBeNull=*/ false>(pointer_array, mirror::Object::ClassOffset());
+
+ int32_t length = pointer_array->GetLength<kVerifyNone>();
+ for (int32_t i = 0; i != length; ++i) {
+ ArtMethod** method_entry = reinterpret_cast<ArtMethod**>(
+ pointer_array->ElementAddress<kVerifyNone>(i, kPointerSize));
+ PatchNativePointer<kPointerSize, /*kMayBeNull=*/ false>(diff_, method_entry);
+ }
+ }
+
+ void VisitObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Visit all reference fields.
+ object->VisitReferences</*kVisitNativeRoots=*/ false,
+ kVerifyNone,
+ kWithoutReadBarrier>(*this, *this);
+ // This function should not be called for classes.
+ DCHECK(!object->IsClass<kVerifyNone>());
+ }
+
+ // Visitor for VisitReferences().
+ ALWAYS_INLINE void operator()(mirror::Object* object, MemberOffset field_offset, bool is_static)
+ const REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!is_static);
+ PatchReferenceField(object, field_offset);
+ }
+ // Visitor for VisitReferences(), java.lang.ref.Reference case.
+ ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, mirror::Reference* ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(klass->IsTypeOfReferenceClass());
+ this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
+ }
+ // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+ const {}
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+ void VisitDexCacheArrays(mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_) {
+ FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
+ mirror::DexCache::StringsOffset(),
+ dex_cache->NumStrings<kVerifyNone>());
+ FixupDexCacheArray<mirror::TypeDexCacheType>(dex_cache,
+ mirror::DexCache::ResolvedTypesOffset(),
+ dex_cache->NumResolvedTypes<kVerifyNone>());
+ FixupDexCacheArray<mirror::MethodDexCacheType>(dex_cache,
+ mirror::DexCache::ResolvedMethodsOffset(),
+ dex_cache->NumResolvedMethods<kVerifyNone>());
+ FixupDexCacheArray<mirror::FieldDexCacheType>(dex_cache,
+ mirror::DexCache::ResolvedFieldsOffset(),
+ dex_cache->NumResolvedFields<kVerifyNone>());
+ FixupDexCacheArray<mirror::MethodTypeDexCacheType>(
+ dex_cache,
+ mirror::DexCache::ResolvedMethodTypesOffset(),
+ dex_cache->NumResolvedMethodTypes<kVerifyNone>());
+ FixupDexCacheArray<GcRoot<mirror::CallSite>>(
+ dex_cache,
+ mirror::DexCache::ResolvedCallSitesOffset(),
+ dex_cache->NumResolvedCallSites<kVerifyNone>());
+ }
+
+ private:
+ template <bool kMayBeNull = true>
+ ALWAYS_INLINE void PatchReferenceField(mirror::Object* object, MemberOffset offset) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* old_value =
+ object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+ DCHECK(kMayBeNull || old_value != nullptr);
+ if (!kMayBeNull || old_value != nullptr) {
+ mirror::Object* new_value = RelocatedAddress(old_value, diff_);
+ object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+ /*kCheckTransaction=*/ true,
+ kVerifyNone>(offset, new_value);
+ }
+ }
+
+ template <typename T>
+ void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* array, uint32_t index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
+ "Size check for removing std::atomic<>.");
+ PatchGcRoot(diff_, &(reinterpret_cast<mirror::DexCachePair<T>*>(array)[index].object));
+ }
+
+ template <typename T>
+ void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* array, uint32_t index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(sizeof(std::atomic<mirror::NativeDexCachePair<T>>) ==
+ sizeof(mirror::NativeDexCachePair<T>),
+ "Size check for removing std::atomic<>.");
+ mirror::NativeDexCachePair<T> pair =
+ mirror::DexCache::GetNativePairPtrSize(array, index, kPointerSize);
+ if (pair.object != nullptr) {
+ pair.object = RelocatedAddress(pair.object, diff_);
+ mirror::DexCache::SetNativePairPtrSize(array, index, pair, kPointerSize);
+ }
+ }
+
+ void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* array, uint32_t index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchGcRoot(diff_, &array[index]);
+ }
+
+ template <typename EntryType>
+ void FixupDexCacheArray(mirror::DexCache* dex_cache,
+ MemberOffset array_offset,
+ uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) {
+ EntryType* old_array =
+ reinterpret_cast64<EntryType*>(dex_cache->GetField64<kVerifyNone>(array_offset));
+ DCHECK_EQ(old_array != nullptr, size != 0u);
+ if (old_array != nullptr) {
+ EntryType* new_array = RelocatedAddress(old_array, diff_);
+ dex_cache->SetField64<kVerifyNone>(array_offset, reinterpret_cast64<uint64_t>(new_array));
+ for (uint32_t i = 0; i != size; ++i) {
+ FixupDexCacheArrayEntry(new_array, i);
+ }
+ }
+ }
+
+ const uint32_t diff_;
+ };
+
+ template <PointerSize kPointerSize>
+ static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
+ uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
+ PatchedObjectsMap patched_objects(spaces.front()->Begin(),
+ spaces.back()->End() - spaces.front()->Begin());
+ PatchObjectVisitor<kPointerSize> patch_object_visitor(diff);
+
+ mirror::Class* dcheck_class_class = nullptr; // Used only for a DCHECK().
+ for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
+ const ImageSpace* space = spaces[s].get();
+
+ // First patch the image header. The `diff` is OK for patching 32-bit fields but
+ // the 64-bit method fields in the ImageHeader may need a negative `delta`.
+ reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImage(
+ (reinterpret_cast32<uint32_t>(space->Begin()) < diff)
+ ? -static_cast<int64_t>(-diff) : static_cast<int64_t>(diff));
+
+ // Patch fields and methods.
+ const ImageHeader& image_header = space->GetImageHeader();
+ PatchArtFieldVisitor field_visitor(diff);
+ image_header.VisitPackedArtFields(&field_visitor, space->Begin());
+ PatchArtMethodVisitor<kPointerSize> method_visitor(diff);
+ image_header.VisitPackedArtMethods(&method_visitor, space->Begin(), kPointerSize);
+ auto method_table_visitor = [diff](ArtMethod* method) {
+ DCHECK(method != nullptr);
+ return RelocatedAddress(method, diff);
+ };
+ image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
+ image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
+
+ // Patch the intern table.
+ if (image_header.GetInternedStringsSection().Size() != 0u) {
+ const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
+ size_t read_count;
+ InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+ for (GcRoot<mirror::String>& slot : temp_set) {
+ PatchGcRoot</*kMayBeNull=*/ false>(diff, &slot);
+ }
+ }
+
+ // Patch the class table and classes, so that we can traverse class hierarchy to
+ // determine the types of other objects when we visit them later.
+ if (image_header.GetClassTableSection().Size() != 0u) {
+ uint8_t* data = space->Begin() + image_header.GetClassTableSection().Offset();
+ size_t read_count;
+ ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+ DCHECK(!temp_set.empty());
+ ClassTableVisitor class_table_visitor(diff);
+ for (ClassTable::TableSlot& slot : temp_set) {
+ slot.VisitRoot(class_table_visitor);
+ mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+ DCHECK(klass != nullptr);
+ patched_objects.MarkVisited(klass);
+ patch_object_visitor.VisitClass(klass);
+ if (kIsDebugBuild) {
+ mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (dcheck_class_class == nullptr) {
+ dcheck_class_class = class_class;
+ } else {
+ CHECK_EQ(class_class, dcheck_class_class);
+ }
+ }
+ // Then patch the non-embedded vtable and iftable.
+ mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+ if (vtable != nullptr && !patched_objects.IsVisited(vtable)) {
+ patched_objects.MarkVisited(vtable);
+ patch_object_visitor.VisitPointerArray(vtable);
+ }
+ auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+ if (iftable != nullptr) {
+ int32_t ifcount = klass->GetIfTableCount<kVerifyNone, kWithoutReadBarrier>();
+ for (int32_t i = 0; i != ifcount; ++i) {
+ mirror::PointerArray* unpatched_ifarray =
+ iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+ if (unpatched_ifarray != nullptr) {
+ // The iftable has not been patched, so we need to explicitly adjust the pointer.
+ mirror::PointerArray* ifarray = RelocatedAddress(unpatched_ifarray, diff);
+ if (!patched_objects.IsVisited(ifarray)) {
+ patched_objects.MarkVisited(ifarray);
+ patch_object_visitor.VisitPointerArray(ifarray);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Patch class roots now, so that we can recognize mirror::Method and mirror::Constructor.
+ ObjPtr<mirror::Class> method_class;
+ ObjPtr<mirror::Class> constructor_class;
+ {
+ const ImageSpace* space = spaces.front().get();
+ const ImageHeader& image_header = space->GetImageHeader();
+
+ ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
+ image_header.GetImageRoots<kWithoutReadBarrier>();
+ patched_objects.MarkVisited(image_roots.Ptr());
+ patch_object_visitor.VisitObject(image_roots.Ptr());
+
+ ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
+ ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(MakeObjPtr(
+ image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots)));
+ patched_objects.MarkVisited(class_roots.Ptr());
+ patch_object_visitor.VisitObject(class_roots.Ptr());
+
+ method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
+ constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
+ }
+
+ for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
+ const ImageSpace* space = spaces[s].get();
+ const ImageHeader& image_header = space->GetImageHeader();
+
+ static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
+ uint32_t objects_end = image_header.GetObjectsSection().Size();
+ DCHECK_ALIGNED(objects_end, kObjectAlignment);
+ for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
+ mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
+ if (!patched_objects.IsVisited(object)) {
+ // This is the last pass over objects, so we do not need to MarkVisited().
+ patch_object_visitor.VisitObject(object);
+ mirror::Class* klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (klass->IsDexCacheClass<kVerifyNone>()) {
+ // Patch dex cache array pointers and elements.
+ mirror::DexCache* dex_cache = object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
+ patch_object_visitor.VisitDexCacheArrays(dex_cache);
+ } else if (klass == method_class || klass == constructor_class) {
+ // Patch the ArtMethod* in the mirror::Executable subobject.
+ ObjPtr<mirror::Executable> as_executable =
+ ObjPtr<mirror::Executable>::DownCast(MakeObjPtr(object));
+ ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
+ ArtMethod* patched_method = RelocatedAddress(unpatched_method, diff);
+ as_executable->SetArtMethod</*kTransactionActive=*/ false,
+ /*kCheckTransaction=*/ true,
+ kVerifyNone>(patched_method);
+ }
+ }
+ pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
+ }
+ }
+ }
+
+ static void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
+ TimingLogger* logger)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
+ ImageSpace* first_space = spaces.front().get();
+ const ImageHeader& first_space_header = first_space->GetImageHeader();
+ uint32_t diff =
+ static_cast<uint32_t>(first_space->Begin() - first_space_header.GetImageBegin());
+ if (!Runtime::Current()->ShouldRelocate()) {
+ DCHECK_EQ(diff, 0u);
+ return;
+ }
+
+ PointerSize pointer_size = first_space_header.GetPointerSize();
+ if (pointer_size == PointerSize::k64) {
+ DoRelocateSpaces<PointerSize::k64>(spaces, diff);
+ } else {
+ DoRelocateSpaces<PointerSize::k32>(spaces, diff);
+ }
+ }
+
+ static void InitRuntimeMethods(const std::vector<std::unique_ptr<ImageSpace>>& spaces)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ DCHECK(!runtime->HasResolutionMethod());
+ DCHECK(!spaces.empty());
+ ImageSpace* space = spaces[0].get();
+ const ImageHeader& image_header = space->GetImageHeader();
+ // Use oat_file_non_owned_ from the `space` to set the runtime methods.
+ runtime->SetInstructionSet(space->oat_file_non_owned_->GetOatHeader().GetInstructionSet());
+ runtime->SetResolutionMethod(image_header.GetImageMethod(ImageHeader::kResolutionMethod));
+ runtime->SetImtConflictMethod(image_header.GetImageMethod(ImageHeader::kImtConflictMethod));
+ runtime->SetImtUnimplementedMethod(
+ image_header.GetImageMethod(ImageHeader::kImtUnimplementedMethod));
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod),
+ CalleeSaveType::kSaveAllCalleeSaves);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveRefsOnlyMethod),
+ CalleeSaveType::kSaveRefsOnly);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod),
+ CalleeSaveType::kSaveRefsAndArgs);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveEverythingMethod),
+ CalleeSaveType::kSaveEverything);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit),
+ CalleeSaveType::kSaveEverythingForClinit);
+ runtime->SetCalleeSaveMethod(
+ image_header.GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck),
+ CalleeSaveType::kSaveEverythingForSuspendCheck);
+ }
+
std::unique_ptr<ImageSpace> Load(const std::string& image_location,
const std::string& image_filename,
bool validate_oat_file,
+ TimingLogger* logger,
/*inout*/MemMap* image_reservation,
/*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
@@ -1627,8 +2019,8 @@
// descriptor (and the associated exclusive lock) to be released when
// we leave Create.
ScopedFlock image = LockedFile::Open(image_filename.c_str(),
- rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY /* flags */,
- true /* block */,
+ /*flags=*/ rw_lock ? (O_CREAT | O_RDWR) : O_RDONLY,
+ /*block=*/ true,
error_msg);
VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
@@ -1642,7 +2034,8 @@
return Loader::Init(image_filename.c_str(),
image_location.c_str(),
validate_oat_file,
- /* oat_file */ nullptr,
+ /*oat_file=*/ nullptr,
+ logger,
image_reservation,
oat_reservation,
error_msg);
@@ -1655,14 +2048,13 @@
/*out*/ std::vector<std::string>* all_locations,
/*out*/ std::string* error_msg) {
std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_filename);
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
oat_filename,
oat_filename,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
error_msg));
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' for image file %s: %s",
@@ -1708,14 +2100,17 @@
DCHECK(!image_reservation->IsValid());
size_t total_size =
dchecked_integral_cast<size_t>(oat_end - image_start) + extra_reservation_size;
+ bool relocate = Runtime::Current()->ShouldRelocate();
+ // If relocating, choose a random address for ALSR.
+ uint32_t addr = relocate ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
*image_reservation =
MemMap::MapAnonymous("Boot image reservation",
- reinterpret_cast32<uint8_t*>(image_start),
+ reinterpret_cast32<uint8_t*>(addr),
total_size,
PROT_NONE,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
+ /*low_4gb=*/ true,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
error_msg);
if (!image_reservation->IsValid()) {
return false;
@@ -1733,6 +2128,10 @@
return false;
}
}
+ uint32_t diff = reinterpret_cast32<uint32_t>(image_reservation->Begin()) - image_start;
+ image_start += diff;
+ image_end += diff;
+ oat_end += diff;
DCHECK(!oat_reservation->IsValid());
*oat_reservation = image_reservation->RemapAtEnd(reinterpret_cast32<uint8_t*>(image_end),
"Boot image oat reservation",
@@ -1851,11 +2250,8 @@
const std::string& dalvik_cache = loader.GetDalvikCache();
DCHECK(!dalvik_cache.empty());
std::string local_error_msg;
- // All secondary images are verified when the primary image is verified.
- bool verified =
- VerifyImage(image_location.c_str(), dalvik_cache.c_str(), image_isa, &local_error_msg);
bool check_space = CheckSpace(dalvik_cache, &local_error_msg);
- if (!verified || !check_space) {
+ if (!check_space) {
LOG(WARNING) << local_error_msg << " Preemptively pruning the dalvik cache.";
PruneDalvikCache(image_isa);
@@ -1871,27 +2267,9 @@
// Collect all the errors.
std::vector<std::string> error_msgs;
- // Step 1: Check if we have an existing image in the dalvik cache.
- if (loader.HasCache()) {
- std::string local_error_msg;
- // If we have system image, validate system image checksums, otherwise validate the oat file.
- if (loader.LoadFromDalvikCache(/* validate_system_checksums */ loader.HasSystem(),
- /* validate_oat_file */ !loader.HasSystem(),
- extra_reservation_size,
- boot_image_spaces,
- extra_reservation,
- &local_error_msg)) {
- return true;
- }
- error_msgs.push_back(local_error_msg);
- }
+ // Step 1: Check if we have an existing image in /system.
- // Step 2: We have an existing image in /system.
-
- // Step 2.a: We are not required to relocate it. Then we can use it directly.
- bool relocate = Runtime::Current()->ShouldRelocate();
-
- if (loader.HasSystem() && !relocate) {
+ if (loader.HasSystem()) {
std::string local_error_msg;
if (loader.LoadFromSystem(extra_reservation_size,
boot_image_spaces,
@@ -1902,29 +2280,17 @@
error_msgs.push_back(local_error_msg);
}
- // Step 2.b: We require a relocated image. Then we must patch it.
- if (loader.HasSystem() && relocate) {
+ // Step 2: Check if we have an existing image in the dalvik cache.
+ if (loader.HasCache()) {
std::string local_error_msg;
- if (!dex2oat_enabled) {
- local_error_msg = "Patching disabled.";
- } else if (ImageCreationAllowed(loader.IsGlobalCache(), image_isa, &local_error_msg)) {
- bool patch_success = RelocateImage(
- image_location.c_str(), loader.GetDalvikCache().c_str(), image_isa, &local_error_msg);
- if (patch_success) {
- if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
- /* validate_oat_file */ false,
- extra_reservation_size,
- boot_image_spaces,
- extra_reservation,
- &local_error_msg)) {
- return true;
- }
- }
+ if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ true,
+ extra_reservation_size,
+ boot_image_spaces,
+ extra_reservation,
+ &local_error_msg)) {
+ return true;
}
- error_msgs.push_back(StringPrintf("Cannot relocate image %s to %s: %s",
- image_location.c_str(),
- loader.GetCacheFilename().c_str(),
- local_error_msg.c_str()));
+ error_msgs.push_back(local_error_msg);
}
// Step 3: We do not have an existing image in /system,
@@ -1937,8 +2303,7 @@
bool compilation_success =
GenerateImage(loader.GetCacheFilename(), image_isa, &local_error_msg);
if (compilation_success) {
- if (loader.LoadFromDalvikCache(/* validate_system_checksums */ false,
- /* validate_oat_file */ false,
+ if (loader.LoadFromDalvikCache(/*validate_oat_file=*/ false,
extra_reservation_size,
boot_image_spaces,
extra_reservation,
@@ -1997,13 +2362,13 @@
std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg) {
- return Loader::Init(image,
- image,
- /* validate_oat_file */ false,
- oat_file,
- /* image_reservation */ nullptr,
- /* oat_reservation */ nullptr,
- error_msg);
+ return Loader::InitAppImage(image,
+ image,
+ /*validate_oat_file=*/ false,
+ oat_file,
+ /*image_reservation=*/ nullptr,
+ /*oat_reservation=*/ nullptr,
+ error_msg);
}
const OatFile* ImageSpace::GetOatFile() const {
@@ -2030,6 +2395,12 @@
DCHECK_GT(oat_filenames.size(), 1u);
// If the image filename was adapted (e.g., for our tests), we need to change this here,
// too, but need to strip all path components (they will be re-established when loading).
+ // For example, dex location
+ // /system/framework/core-libart.art
+ // with image name
+ // out/target/product/taimen/dex_bootjars/system/framework/arm64/boot-core-libart.art
+ // yields boot class path component
+ // /system/framework/boot-core-libart.art .
std::ostringstream bootcp_oss;
bool first_bootcp = true;
for (size_t i = 0; i < dex_locations.size(); ++i) {
@@ -2049,7 +2420,7 @@
size_t image_last_sep = (image_last_slash == std::string::npos)
? image_last_at
: (image_last_at == std::string::npos)
- ? std::string::npos
+ ? image_last_slash
: std::max(image_last_slash, image_last_at);
// Note: whenever image_last_sep == npos, +1 overflow means using the full string.
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index a2490ac..b940d88 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -57,9 +57,9 @@
// Reads the image header from the specified image location for the
// instruction set image_isa. Returns null on failure, with
// reason in error_msg.
- static ImageHeader* ReadImageHeader(const char* image_location,
- InstructionSet image_isa,
- std::string* error_msg);
+ static std::unique_ptr<ImageHeader> ReadImageHeader(const char* image_location,
+ InstructionSet image_isa,
+ std::string* error_msg);
// Give access to the OatFile.
const OatFile* GetOatFile() const;
@@ -148,16 +148,6 @@
return Begin() + GetImageHeader().GetImageSize();
}
- // Return the start of the associated oat file.
- uint8_t* GetOatFileBegin() const {
- return GetImageHeader().GetOatFileBegin();
- }
-
- // Return the end of the associated oat file.
- uint8_t* GetOatFileEnd() const {
- return GetImageHeader().GetOatFileEnd();
- }
-
void DumpSections(std::ostream& os) const;
// De-initialize the image-space by undoing the effects in Init().
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index 299a413..0a35bce 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -43,14 +43,13 @@
args.push_back("--oat-file=" + oat_location);
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- std::unique_ptr<OatFile> oat(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> oat(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
- /* abs_dex_location */ nullptr,
- /* reservation */ nullptr,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(oat != nullptr) << error_msg;
@@ -110,7 +109,7 @@
EXPECT_FALSE(ImageSpace::ValidateOatFile(*oat, &error_msg));
}
-template <bool kImage, bool kRelocate, bool kPatchoat, bool kImageDex2oat>
+template <bool kImage, bool kRelocate, bool kImageDex2oat>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
void SetUpRuntimeOptions(RuntimeOptions* options) override {
@@ -119,9 +118,6 @@
nullptr);
}
options->emplace_back(kRelocate ? "-Xrelocate" : "-Xnorelocate", nullptr);
- if (!kPatchoat) {
- options->emplace_back("-Xpatchoat:false", nullptr);
- }
options->emplace_back(kImageDex2oat ? "-Ximage-dex2oat" : "-Xnoimage-dex2oat", nullptr);
// We want to test the relocation behavior of ImageSpace. As such, don't pretend we're a
@@ -130,27 +126,22 @@
}
};
-using ImageSpacePatchoatTest = ImageSpaceLoadingTest<true, true, true, true>;
-TEST_F(ImageSpacePatchoatTest, Test) {
- EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
-}
-
-using ImageSpaceDex2oatTest = ImageSpaceLoadingTest<false, true, false, true>;
+using ImageSpaceDex2oatTest = ImageSpaceLoadingTest<false, true, true>;
TEST_F(ImageSpaceDex2oatTest, Test) {
EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
}
-using ImageSpaceNoDex2oatNoPatchoatTest = ImageSpaceLoadingTest<true, true, false, false>;
-TEST_F(ImageSpaceNoDex2oatNoPatchoatTest, Test) {
- EXPECT_TRUE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
-}
-
-using ImageSpaceNoRelocateNoDex2oatNoPatchoatTest = ImageSpaceLoadingTest<true, false, false, false>;
-TEST_F(ImageSpaceNoRelocateNoDex2oatNoPatchoatTest, Test) {
+using ImageSpaceNoDex2oatTest = ImageSpaceLoadingTest<true, true, false>;
+TEST_F(ImageSpaceNoDex2oatTest, Test) {
EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
}
-class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, false, true> {
+using ImageSpaceNoRelocateNoDex2oatTest = ImageSpaceLoadingTest<true, false, false>;
+TEST_F(ImageSpaceNoRelocateNoDex2oatTest, Test) {
+ EXPECT_FALSE(Runtime::Current()->GetHeap()->GetBootImageSpaces().empty());
+}
+
+class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, true> {
protected:
void SetUpRuntimeOptions(RuntimeOptions* options) override {
const char* android_data = getenv("ANDROID_DATA");
@@ -169,7 +160,7 @@
CHECK_NE(fd, -1) << strerror(errno);
result = close(fd);
CHECK_EQ(result, 0) << strerror(errno);
- ImageSpaceLoadingTest<false, true, false, true>::SetUpRuntimeOptions(options);
+ ImageSpaceLoadingTest<false, true, true>::SetUpRuntimeOptions(options);
}
void TearDown() override {
@@ -179,7 +170,7 @@
CHECK_EQ(result, 0) << strerror(errno);
result = setenv("ANDROID_DATA", old_android_data_.c_str(), /* replace */ 1);
CHECK_EQ(result, 0) << strerror(errno);
- ImageSpaceLoadingTest<false, true, false, true>::TearDown();
+ ImageSpaceLoadingTest<false, true, true>::TearDown();
}
private:
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 09d0251..b783cfe 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -108,8 +108,10 @@
mark_bitmap_->SetName(temp_name);
}
-LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
+LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
+ const char* lock_name)
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
+ lock_(lock_name, kAllocSpaceLock),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
total_objects_allocated_(0), begin_(begin), end_(end) {
}
@@ -120,8 +122,7 @@
}
LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
- : LargeObjectSpace(name, nullptr, nullptr),
- lock_("large object map space lock", kAllocSpaceLock) {}
+ : LargeObjectSpace(name, nullptr, nullptr, "large object map space lock") {}
LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
if (Runtime::Current()->IsRunningOnMemoryTool()) {
@@ -362,9 +363,8 @@
MemMap&& mem_map,
uint8_t* begin,
uint8_t* end)
- : LargeObjectSpace(name, begin, end),
- mem_map_(std::move(mem_map)),
- lock_("free list space lock", kAllocSpaceLock) {
+ : LargeObjectSpace(name, begin, end, "free list space lock"),
+ mem_map_(std::move(mem_map)) {
const size_t space_capacity = end - begin;
free_end_ = space_capacity;
CHECK_ALIGNED(space_capacity, kAlignment);
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 26c6463..47167fa 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -22,6 +22,7 @@
#include "base/tracking_safe_map.h"
#include "dlmalloc_space.h"
#include "space.h"
+#include "thread-current-inl.h"
#include <set>
#include <vector>
@@ -50,15 +51,19 @@
virtual ~LargeObjectSpace() {}
uint64_t GetBytesAllocated() override {
+ MutexLock mu(Thread::Current(), lock_);
return num_bytes_allocated_;
}
uint64_t GetObjectsAllocated() override {
+ MutexLock mu(Thread::Current(), lock_);
return num_objects_allocated_;
}
uint64_t GetTotalBytesAllocated() const {
+ MutexLock mu(Thread::Current(), lock_);
return total_bytes_allocated_;
}
uint64_t GetTotalObjectsAllocated() const {
+ MutexLock mu(Thread::Current(), lock_);
return total_objects_allocated_;
}
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) override;
@@ -110,14 +115,26 @@
virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
protected:
- explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end);
+ explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
+ const char* lock_name);
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg);
- // Approximate number of bytes which have been allocated into the space.
- uint64_t num_bytes_allocated_;
- uint64_t num_objects_allocated_;
- uint64_t total_bytes_allocated_;
- uint64_t total_objects_allocated_;
+ // Used to ensure mutual exclusion when the allocation spaces data structures,
+ // including the allocation counters below, are being modified.
+ mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // Number of bytes which have been allocated into the space and not yet freed. The count is also
+ // included in the identically named field in Heap. Counts actual allocated (after rounding),
+ // not requested, sizes. TODO: It would be cheaper to just maintain total allocated and total
+ // free counts.
+ uint64_t num_bytes_allocated_ GUARDED_BY(lock_);
+ uint64_t num_objects_allocated_ GUARDED_BY(lock_);
+
+ // Totals for large objects ever allocated, including those that have since been deallocated.
+ // Never decremented.
+ uint64_t total_bytes_allocated_ GUARDED_BY(lock_);
+ uint64_t total_objects_allocated_ GUARDED_BY(lock_);
+
// Begin and end, may change as more large objects are allocated.
uint8_t* begin_;
uint8_t* end_;
@@ -157,8 +174,6 @@
bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
- // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
- mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
GUARDED_BY(lock_);
};
@@ -215,7 +230,6 @@
MemMap allocation_info_map_;
AllocationInfo* allocation_info_;
- mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Free bytes at the end of the space.
size_t free_end_ GUARDED_BY(lock_);
FreeBlocks free_blocks_ GUARDED_BY(lock_);
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 9baa016..d55ccd6 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -128,7 +128,7 @@
AllocRaceTask(size_t id, size_t iterations, size_t size, LargeObjectSpace* los) :
id_(id), iterations_(iterations), size_(size), los_(los) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
for (size_t i = 0; i < iterations_ ; ++i) {
size_t alloc_size, bytes_tl_bulk_allocated;
mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr,
@@ -140,7 +140,7 @@
}
}
- virtual void Finalize() {
+ void Finalize() override {
delete this;
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index e048515..bda1f1c 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -60,7 +60,8 @@
return obj;
}
MutexLock mu(Thread::Current(), region_lock_);
- // Retry with current region since another thread may have updated it.
+ // Retry with current region since another thread may have updated
+ // current_region_ or evac_region_. TODO: fix race.
obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
bytes_allocated,
usable_size,
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index f74fa86..eba6fac 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -69,6 +69,7 @@
if (!mem_map.IsValid()) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
<< PrettySize(capacity) << " with message " << error_msg;
+ PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
MemMap::DumpMaps(LOG_STREAM(ERROR));
return MemMap::Invalid();
}
@@ -726,7 +727,7 @@
void RegionSpace::RecordAlloc(mirror::Object* ref) {
CHECK(ref != nullptr);
Region* r = RefToRegion(ref);
- r->objects_allocated_.fetch_add(1, std::memory_order_seq_cst);
+ r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
}
bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 0bf4f38..5af1dd3 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -35,8 +35,10 @@
// will not try to allocate a new region from the beginning of the
// region space, but from the last allocated region. This allocation
// strategy reduces region reuse and should help catch some GC bugs
-// earlier.
-static constexpr bool kCyclicRegionAllocation = true;
+// earlier. However, cyclic region allocation can also create memory
+// fragmentation at the region level (see b/33795328); therefore, we
+// only enable it in debug mode.
+static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
// A space that consists of equal-sized regions.
class RegionSpace final : public ContinuousMemMapAllocSpace {
@@ -580,6 +582,8 @@
// (large region + one or more large tail regions).
Atomic<uint8_t*> top_; // The current position of the allocation.
uint8_t* end_; // The end address of the region.
+ // objects_allocated_ is accessed using memory_order_relaxed. Treat as approximate when there
+ // are concurrent updates.
Atomic<size_t> objects_allocated_; // The number of objects allocated.
uint32_t alloc_time_; // The allocation time of the region.
// Note that newly allocated and evacuated regions use -1 as
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index ed85b06..f482466 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -127,7 +127,7 @@
// Need to mark the card since this will update the mod-union table next GC cycle.
card_table->MarkCard(ptrs[i]);
}
- zygote_space->objects_allocated_.fetch_sub(num_ptrs, std::memory_order_seq_cst);
+ zygote_space->objects_allocated_.fetch_sub(num_ptrs);
}
} // namespace space
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 1f73577..03e2ec8 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -68,7 +68,7 @@
}
uint64_t GetObjectsAllocated() {
- return objects_allocated_.load(std::memory_order_seq_cst);
+ return objects_allocated_.load();
}
void Clear() override;
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 5d234ea..0281eee 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -58,8 +58,8 @@
oss << " klass=" << klass;
if (IsValidClass(klass)) {
oss << "(" << klass->PrettyClass() << ")";
- if (klass->IsArrayClass<kVerifyNone, kWithoutReadBarrier>()) {
- oss << " length=" << obj->AsArray<kVerifyNone, kWithoutReadBarrier>()->GetLength();
+ if (klass->IsArrayClass<kVerifyNone>()) {
+ oss << " length=" << obj->AsArray<kVerifyNone>()->GetLength();
}
} else {
oss << " <invalid address>";
@@ -88,6 +88,7 @@
// Lowest priority logging first:
PrintFileToLog("/proc/self/maps", android::base::LogSeverity::FATAL_WITHOUT_ABORT);
MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse */ true);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
// Buffer the output in the string stream since it is more important than the stack traces
// and we want it to have log priority. The stack traces are printed from Runtime::Abort
// which is called from LOG(FATAL) but before the abort message.
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
deleted file mode 100644
index 464c2b7..0000000
--- a/runtime/generated/asm_support_gen.h
+++ /dev/null
@@ -1,173 +0,0 @@
-
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
-// This file has been auto-generated by cpp-define-generator; do not edit directly.
-
-#define STACK_REFERENCE_SIZE 0x4
-DEFINE_CHECK_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::StackReference<art::mirror::Object>))))
-#define COMPRESSED_REFERENCE_SIZE 0x4
-DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE), (static_cast<size_t>(sizeof(art::mirror::CompressedReference<art::mirror::Object>))))
-#define COMPRESSED_REFERENCE_SIZE_SHIFT 0x2
-DEFINE_CHECK_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE_SHIFT), (static_cast<size_t>(art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))))
-#define RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveAllCalleeSaves))))
-#define RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsOnly))))
-#define RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET 0x10
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsAndArgs))))
-#define RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 0x18
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverything))))
-#define RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET 0x20
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForClinit))))
-#define RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET 0x28
-DEFINE_CHECK_EQ(static_cast<size_t>(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET), (static_cast<size_t>(art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))))
-#define THREAD_FLAGS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_FLAGS_OFFSET), (static_cast<int32_t>(art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_ID_OFFSET 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_ID_OFFSET), (static_cast<int32_t>(art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_IS_GC_MARKING_OFFSET 52
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_IS_GC_MARKING_OFFSET), (static_cast<int32_t>(art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())))
-#define THREAD_CARD_TABLE_OFFSET 136
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CARD_TABLE_OFFSET), (static_cast<int32_t>(art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())))
-#define MIRROR_CLASS_DEX_CACHE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_CLASS_DEX_CACHE_OFFSET), (static_cast<int32_t>(art::mirror::Class::DexCacheOffset().Int32Value())))
-#define MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET 48
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET), (static_cast<int32_t>(art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())))
-#define MIRROR_OBJECT_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_CLASS_OFFSET), (static_cast<int32_t>(art::mirror::Object::ClassOffset().Int32Value())))
-#define MIRROR_OBJECT_LOCK_WORD_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(MIRROR_OBJECT_LOCK_WORD_OFFSET), (static_cast<int32_t>(art::mirror::Object::MonitorOffset().Int32Value())))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE 0x80000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), (static_cast<uint32_t>((art::kAccClassIsFinalizable))))
-#define ACCESS_FLAGS_CLASS_IS_INTERFACE 0x200
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_INTERFACE), (static_cast<uint32_t>((art::kAccInterface))))
-#define ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT 0x1f
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT), (static_cast<uint32_t>((art::MostSignificantBit(art::kAccClassIsFinalizable)))))
-#define ART_METHOD_JNI_OFFSET_32 20
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())))
-#define ART_METHOD_JNI_OFFSET_64 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_JNI_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())))
-#define ART_METHOD_QUICK_CODE_OFFSET_32 24
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_32), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())))
-#define ART_METHOD_QUICK_CODE_OFFSET_64 32
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_QUICK_CODE_OFFSET_64), (static_cast<int32_t>(art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())))
-#define ART_METHOD_DECLARING_CLASS_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_DECLARING_CLASS_OFFSET), (static_cast<int32_t>(art::ArtMethod::DeclaringClassOffset().Int32Value())))
-#define ART_METHOD_ACCESS_FLAGS_OFFSET 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(ART_METHOD_ACCESS_FLAGS_OFFSET), (static_cast<int32_t>(art::ArtMethod::AccessFlagsOffset().Int32Value())))
-#define STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT 3
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))))
-#define STRING_DEX_CACHE_SIZE_MINUS_ONE 1023
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
-#define STRING_DEX_CACHE_HASH_BITS 10
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
-#define STRING_DEX_CACHE_ELEMENT_SIZE 8
-DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
-#define METHOD_DEX_CACHE_SIZE_MINUS_ONE 1023
-DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheMethodCacheSize - 1)))
-#define METHOD_DEX_CACHE_HASH_BITS 10
-DEFINE_CHECK_EQ(static_cast<int32_t>(METHOD_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))))
-#define CARD_TABLE_CARD_SHIFT 0xa
-DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>(art::gc::accounting::CardTable::kCardShift)))
-#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
-DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
-#define LOCK_WORD_STATE_SHIFT 30
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
-#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
-#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
-#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
-#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
-#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddressOverflow)))
-#define LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 0x3
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT), (static_cast<uint32_t>(art::LockWord::kForwardingAddressShift)))
-#define LOCK_WORD_GC_STATE_MASK_SHIFTED 0x30000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
-#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
-#define LOCK_WORD_GC_STATE_SIZE 2
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
-#define LOCK_WORD_GC_STATE_SHIFT 28
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
-#define LOCK_WORD_MARK_BIT_SHIFT 29
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_MARK_BIT_SHIFT), (static_cast<int32_t>(art::LockWord::kMarkBitStateShift)))
-#define LOCK_WORD_MARK_BIT_MASK_SHIFTED 0x20000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_MARK_BIT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kMarkBitStateMaskShifted)))
-#define STD_MEMORY_ORDER_RELAXED 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(STD_MEMORY_ORDER_RELAXED), (static_cast<int32_t>(std::memory_order_relaxed)))
-#define OBJECT_ALIGNMENT_MASK 0x7
-DEFINE_CHECK_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), (static_cast<size_t>(art::kObjectAlignment - 1)))
-#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
-DEFINE_CHECK_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED), (static_cast<uint32_t>(~static_cast<uint32_t>(art::kObjectAlignment - 1))))
-#define OBJECT_ALIGNMENT_MASK_TOGGLED64 0xfffffffffffffff8
-DEFINE_CHECK_EQ(static_cast<uint64_t>(OBJECT_ALIGNMENT_MASK_TOGGLED64), (static_cast<uint64_t>(~static_cast<uint64_t>(art::kObjectAlignment - 1))))
-#define ACC_OBSOLETE_METHOD 262144
-DEFINE_CHECK_EQ(static_cast<int32_t>(ACC_OBSOLETE_METHOD), (static_cast<int32_t>(art::kAccObsoleteMethod)))
-#define ACC_OBSOLETE_METHOD_SHIFT 18
-DEFINE_CHECK_EQ(static_cast<int32_t>(ACC_OBSOLETE_METHOD_SHIFT), (static_cast<int32_t>(art::WhichPowerOf2(art::kAccObsoleteMethod))))
-#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 3
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), (static_cast<int32_t>((art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 7
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK), (static_cast<int32_t>((static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff8
-DEFINE_CHECK_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32), (static_cast<uint32_t>((~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff8
-DEFINE_CHECK_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64), (static_cast<uint64_t>((~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)))))
-#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListOffset()))))
-#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))))
-#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))))
-#define ROSALLOC_SLOT_NEXT_OFFSET 0
-DEFINE_CHECK_EQ(static_cast<int32_t>(ROSALLOC_SLOT_NEXT_OFFSET), (static_cast<int32_t>((art::gc::allocator::RosAlloc::RunSlotNextOffset()))))
-#define THREAD_SUSPEND_REQUEST 1
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_REQUEST), (static_cast<int32_t>((art::kSuspendRequest))))
-#define THREAD_CHECKPOINT_REQUEST 2
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kCheckpointRequest))))
-#define THREAD_EMPTY_CHECKPOINT_REQUEST 4
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_EMPTY_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kEmptyCheckpointRequest))))
-#define THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 7
-DEFINE_CHECK_EQ(static_cast<int32_t>(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), (static_cast<int32_t>((art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest))))
-#define JIT_CHECK_OSR (-1)
-DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_CHECK_OSR), (static_cast<int16_t>((art::jit::kJitCheckForOSR))))
-#define JIT_HOTNESS_DISABLE (-2)
-DEFINE_CHECK_EQ(static_cast<int16_t>(JIT_HOTNESS_DISABLE), (static_cast<int16_t>((art::jit::kJitHotnessDisabled))))
-
-#endif // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index e8a47d1..f696e25 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -148,11 +148,11 @@
hprof_basic_long = 11,
};
-typedef uint32_t HprofStringId;
-typedef uint32_t HprofClassObjectId;
-typedef uint32_t HprofClassSerialNumber;
-typedef uint32_t HprofStackTraceSerialNumber;
-typedef uint32_t HprofStackFrameId;
+using HprofStringId = uint32_t;
+using HprofClassObjectId = uint32_t;
+using HprofClassSerialNumber = uint32_t;
+using HprofStackTraceSerialNumber = uint32_t;
+using HprofStackFrameId = uint32_t;
static constexpr HprofStackTraceSerialNumber kHprofNullStackTrace = 0;
class EndianOutput {
@@ -1073,7 +1073,8 @@
if (obj->IsClass() && obj->AsClass()->IsRetired()) {
return;
}
- DCHECK(visited_objects_.insert(obj).second) << "Already visited " << obj;
+ DCHECK(visited_objects_.insert(obj).second)
+ << "Already visited " << obj << "(" << obj->PrettyTypeOf() << ")";
++total_objects_;
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index c527f6f..2082064 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -49,6 +49,38 @@
return image_roots;
}
+inline void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
+ const ImageSection& fields = GetFieldsSection();
+ for (size_t pos = 0u; pos < fields.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
+ for (size_t i = 0u; i < array->size(); ++i) {
+ visitor->Visit(&array->At(i, sizeof(ArtField)));
+ }
+ pos += array->ComputeSize(array->size());
+ }
+}
+
+inline void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
+ uint8_t* base,
+ PointerSize pointer_size) const {
+ const size_t method_alignment = ArtMethod::Alignment(pointer_size);
+ const size_t method_size = ArtMethod::Size(pointer_size);
+ const ImageSection& methods = GetMethodsSection();
+ for (size_t pos = 0u; pos < methods.Size(); ) {
+ auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
+ for (size_t i = 0u; i < array->size(); ++i) {
+ visitor->Visit(&array->At(i, method_size, method_alignment));
+ }
+ pos += array->ComputeSize(array->size(), method_size, method_alignment);
+ }
+ const ImageSection& runtime_methods = GetRuntimeMethodsSection();
+ for (size_t pos = 0u; pos < runtime_methods.Size(); ) {
+ auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
+ visitor->Visit(method);
+ pos += method_size;
+ }
+}
+
template <typename Visitor>
inline void ImageHeader::VisitPackedImTables(const Visitor& visitor,
uint8_t* base,
diff --git a/runtime/image.cc b/runtime/image.cc
index 028c515..a4351d0 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' }; // Image relocations.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '4', '\0' }; // Remove PIC flags.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -42,8 +42,6 @@
uint32_t boot_oat_begin,
uint32_t boot_oat_size,
uint32_t pointer_size,
- bool compile_pic,
- bool is_pic,
StorageMode storage_mode,
size_t data_size)
: image_begin_(image_begin),
@@ -60,8 +58,6 @@
patch_delta_(0),
image_roots_(image_roots),
pointer_size_(pointer_size),
- compile_pic_(compile_pic),
- is_pic_(is_pic),
storage_mode_(storage_mode),
data_size_(data_size) {
CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
@@ -77,7 +73,7 @@
std::copy_n(sections, kSectionCount, sections_);
}
-void ImageHeader::RelocateImage(off_t delta) {
+void ImageHeader::RelocateImage(int64_t delta) {
CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned";
oat_file_begin_ += delta;
oat_data_begin_ += delta;
@@ -88,12 +84,12 @@
RelocateImageMethods(delta);
}
-void ImageHeader::RelocateImageObjects(off_t delta) {
+void ImageHeader::RelocateImageObjects(int64_t delta) {
image_begin_ += delta;
image_roots_ += delta;
}
-void ImageHeader::RelocateImageMethods(off_t delta) {
+void ImageHeader::RelocateImageMethods(int64_t delta) {
for (size_t i = 0; i < kImageMethodsCount; ++i) {
image_methods_[i] += delta;
}
@@ -152,38 +148,6 @@
}
}
-void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
- const ImageSection& fields = GetFieldsSection();
- for (size_t pos = 0; pos < fields.Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos);
- for (size_t i = 0; i < array->size(); ++i) {
- visitor->Visit(&array->At(i, sizeof(ArtField)));
- }
- pos += array->ComputeSize(array->size());
- }
-}
-
-void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor,
- uint8_t* base,
- PointerSize pointer_size) const {
- const size_t method_alignment = ArtMethod::Alignment(pointer_size);
- const size_t method_size = ArtMethod::Size(pointer_size);
- const ImageSection& methods = GetMethodsSection();
- for (size_t pos = 0; pos < methods.Size(); ) {
- auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos);
- for (size_t i = 0; i < array->size(); ++i) {
- visitor->Visit(&array->At(i, method_size, method_alignment));
- }
- pos += array->ComputeSize(array->size(), method_size, method_alignment);
- }
- const ImageSection& runtime_methods = GetRuntimeMethodsSection();
- for (size_t pos = 0; pos < runtime_methods.Size(); ) {
- auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos);
- visitor->Visit(method);
- pos += method_size;
- }
-}
-
PointerSize ImageHeader::GetPointerSize() const {
return ConvertToPointerSize(pointer_size_);
}
diff --git a/runtime/image.h b/runtime/image.h
index af092ad..bd8bc28 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -108,8 +108,6 @@
patch_delta_(0),
image_roots_(0U),
pointer_size_(0U),
- compile_pic_(0),
- is_pic_(0),
storage_mode_(kDefaultStorageMode),
data_size_(0) {}
@@ -127,8 +125,6 @@
uint32_t boot_oat_begin,
uint32_t boot_oat_size,
uint32_t pointer_size,
- bool compile_pic,
- bool is_pic,
StorageMode storage_mode,
size_t data_size);
@@ -175,11 +171,11 @@
return pointer_size_;
}
- off_t GetPatchDelta() const {
+ int32_t GetPatchDelta() const {
return patch_delta_;
}
- void SetPatchDelta(off_t patch_delta) {
+ void SetPatchDelta(int32_t patch_delta) {
patch_delta_ = patch_delta;
}
@@ -219,6 +215,16 @@
kBootImageLiveObjects = kSpecialRoots, // Array of boot image objects that must be kept live.
};
+ /*
+ * This describes the number and ordering of sections inside of Boot
+ * and App Images. It is very important that changes to this struct
+ * are reflected in the compiler and loader.
+ *
+ * See:
+ * - ImageWriter::ImageInfo::CreateImageSections()
+ * - ImageWriter::Write()
+ * - ImageWriter::AllocMemory()
+ */
enum ImageSections {
kSectionObjects,
kSectionArtFields,
@@ -229,6 +235,7 @@
kSectionDexCacheArrays,
kSectionInternedStrings,
kSectionClassTable,
+ kSectionStringReferenceOffsets,
kSectionImageBitmap,
kSectionImageRelocations,
kSectionCount, // Number of elements in enum.
@@ -291,6 +298,10 @@
return GetImageSection(kSectionImageRelocations);
}
+ const ImageSection& GetImageStringReferenceOffsetsSection() const {
+ return GetImageSection(kSectionStringReferenceOffsets);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Object> GetImageRoot(ImageRoot image_root) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -299,17 +310,9 @@
ObjPtr<mirror::ObjectArray<mirror::Object>> GetImageRoots() const
REQUIRES_SHARED(Locks::mutator_lock_);
- void RelocateImage(off_t delta);
- void RelocateImageMethods(off_t delta);
- void RelocateImageObjects(off_t delta);
-
- bool CompilePic() const {
- return compile_pic_ != 0;
- }
-
- bool IsPic() const {
- return is_pic_ != 0;
- }
+ void RelocateImage(int64_t delta);
+ void RelocateImageMethods(int64_t delta);
+ void RelocateImageObjects(int64_t delta);
uint32_t GetBootImageBegin() const {
return boot_image_begin_;
@@ -427,14 +430,6 @@
// Pointer size, this affects the size of the ArtMethods.
uint32_t pointer_size_;
- // Boolean (0 or 1) to denote if the image was compiled with --compile-pic option
- const uint32_t compile_pic_;
-
- // Boolean (0 or 1) to denote if the image can be mapped at a random address, this only refers to
- // the .art file. Currently, app oat files do not depend on their app image. There are no pointers
- // from the app oat code to the app image.
- const uint32_t is_pic_;
-
// Image section sizes/offsets correspond to the uncompressed form.
ImageSection sections_[kSectionCount];
@@ -451,6 +446,39 @@
friend class linker::ImageWriter;
};
+/*
+ * Tags the last bit. Used by AppImage logic to differentiate between managed
+ * and native references.
+ */
+template<typename T>
+T SetNativeRefTag(T val) {
+ static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+ return val | 1u;
+}
+
+/*
+ * Retrieves the value of the last bit. Used by AppImage logic to
+ * differentiate between managed and native references.
+ */
+template<typename T>
+bool HasNativeRefTag(T val) {
+ static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+ return (val & 1u) == 1u;
+}
+
+/*
+ * Sets the last bit of the value to 0. Used by AppImage logic to
+ * differentiate between managed and native references.
+ */
+template<typename T>
+T ClearNativeRefTag(T val) {
+ static_assert(std::is_integral<T>::value, "Expected integral type.");
+
+ return val & ~1u;
+}
+
std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy);
std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& policy);
std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageSections& section);
diff --git a/runtime/imt_conflict_table.h b/runtime/imt_conflict_table.h
index 3586864..02b3be4 100644
--- a/runtime/imt_conflict_table.h
+++ b/runtime/imt_conflict_table.h
@@ -187,17 +187,17 @@
ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
if (pointer_size == PointerSize::k64) {
- return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
+ return reinterpret_cast64<ArtMethod*>(data64_[index]);
} else {
- return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index]));
+ return reinterpret_cast32<ArtMethod*>(data32_[index]);
}
}
void SetMethod(size_t index, PointerSize pointer_size, ArtMethod* method) {
if (pointer_size == PointerSize::k64) {
- data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method));
+ data64_[index] = reinterpret_cast64<uint64_t>(method);
} else {
- data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method));
+ data32_[index] = reinterpret_cast32<uint32_t>(method);
}
}
diff --git a/runtime/imtable.h b/runtime/imtable.h
index aa0a504..3c52fb8 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -21,6 +21,7 @@
#error IMT_SIZE not defined
#endif
+#include "base/casts.h"
#include "base/enums.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -46,10 +47,10 @@
uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
- return reinterpret_cast<ArtMethod*>(value);
+ return reinterpret_cast32<ArtMethod*>(value);
} else {
uint64_t value = *reinterpret_cast<uint64_t*>(ptr);
- return reinterpret_cast<ArtMethod*>(value);
+ return reinterpret_cast64<ArtMethod*>(value);
}
}
@@ -57,11 +58,9 @@
DCHECK_LT(index, kSize);
uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
- uintptr_t value = reinterpret_cast<uintptr_t>(method);
- DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
- *reinterpret_cast<uint32_t*>(ptr) = static_cast<uint32_t>(value);
+ *reinterpret_cast<uint32_t*>(ptr) = reinterpret_cast32<uint32_t>(method);
} else {
- *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast<uint64_t>(method);
+ *reinterpret_cast<uint64_t*>(ptr) = reinterpret_cast64<uint64_t>(method);
}
}
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8ab4a9b..d205225 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -21,6 +21,7 @@
#include "base/utils.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
+#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
#include "reference_table.h"
#include "runtime.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index b42433c..4937132 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -160,7 +160,6 @@
have_exception_thrown_listeners_(false),
have_watched_frame_pop_listeners_(false),
have_branch_listeners_(false),
- have_invoke_virtual_or_interface_listeners_(false),
have_exception_handled_listeners_(false),
deoptimized_methods_lock_("deoptimized methods lock", kGenericBottomLock),
deoptimization_enabled_(false),
@@ -562,11 +561,6 @@
branch_listeners_,
listener,
&have_branch_listeners_);
- PotentiallyAddListenerTo(kInvokeVirtualOrInterface,
- events,
- invoke_virtual_or_interface_listeners_,
- listener,
- &have_invoke_virtual_or_interface_listeners_);
PotentiallyAddListenerTo(kDexPcMoved,
events,
dex_pc_listeners_,
@@ -649,11 +643,6 @@
branch_listeners_,
listener,
&have_branch_listeners_);
- PotentiallyRemoveListenerFrom(kInvokeVirtualOrInterface,
- events,
- invoke_virtual_or_interface_listeners_,
- listener,
- &have_invoke_virtual_or_interface_listeners_);
PotentiallyRemoveListenerFrom(kDexPcMoved,
events,
dex_pc_listeners_,
@@ -1213,21 +1202,6 @@
}
}
-void Instrumentation::InvokeVirtualOrInterfaceImpl(Thread* thread,
- ObjPtr<mirror::Object> this_object,
- ArtMethod* caller,
- uint32_t dex_pc,
- ArtMethod* callee) const {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::Object> thiz(hs.NewHandle(this_object));
- for (InstrumentationListener* listener : invoke_virtual_or_interface_listeners_) {
- if (listener != nullptr) {
- listener->InvokeVirtualOrInterface(thread, thiz, caller, dex_pc, callee);
- }
- }
-}
-
void Instrumentation::WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const {
for (InstrumentationListener* listener : watched_frame_pop_listeners_) {
if (listener != nullptr) {
@@ -1385,7 +1359,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
shorty('V') {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m != nullptr && !m->IsRuntimeMethod()) {
// The first Java method.
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index e5d8800..b3fae25 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -143,14 +143,6 @@
int32_t dex_pc_offset)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
- // Call-back for when we get an invokevirtual or an invokeinterface.
- virtual void InvokeVirtualOrInterface(Thread* thread,
- Handle<mirror::Object> this_object,
- ArtMethod* caller,
- uint32_t dex_pc,
- ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_) = 0;
-
// Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
// either return or exceptions. Normally instrumentation listeners should ensure that there are
// shadow-frames by deoptimizing stacks.
@@ -193,7 +185,6 @@
kFieldWritten = 0x20,
kExceptionThrown = 0x40,
kBranch = 0x80,
- kInvokeVirtualOrInterface = 0x100,
kWatchedFramePop = 0x200,
kExceptionHandled = 0x400,
};
@@ -377,10 +368,6 @@
return have_branch_listeners_;
}
- bool HasInvokeVirtualOrInterfaceListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return have_invoke_virtual_or_interface_listeners_;
- }
-
bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_watched_frame_pop_listeners_;
}
@@ -393,8 +380,8 @@
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_thrown_listeners_ || have_method_unwind_listeners_ ||
- have_branch_listeners_ || have_invoke_virtual_or_interface_listeners_ ||
- have_watched_frame_pop_listeners_ || have_exception_handled_listeners_;
+ have_branch_listeners_ || have_watched_frame_pop_listeners_ ||
+ have_exception_handled_listeners_;
}
// Any instrumentation *other* than what is needed for Jit profiling active?
@@ -470,17 +457,6 @@
}
}
- void InvokeVirtualOrInterface(Thread* thread,
- mirror::Object* this_object,
- ArtMethod* caller,
- uint32_t dex_pc,
- ArtMethod* callee) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
- InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
- }
- }
-
// Inform listeners that a branch has been taken (only supported by the interpreter).
void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -598,12 +574,6 @@
REQUIRES_SHARED(Locks::mutator_lock_);
void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void InvokeVirtualOrInterfaceImpl(Thread* thread,
- ObjPtr<mirror::Object> this_object,
- ArtMethod* caller,
- uint32_t dex_pc,
- ArtMethod* callee) const
- REQUIRES_SHARED(Locks::mutator_lock_);
void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
REQUIRES_SHARED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread,
@@ -683,9 +653,6 @@
// Do we have any branch listeners? Short-cut to avoid taking the instrumentation_lock_.
bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
- // Do we have any invoke listeners? Short-cut to avoid taking the instrumentation_lock_.
- bool have_invoke_virtual_or_interface_listeners_ GUARDED_BY(Locks::mutator_lock_);
-
// Do we have any exception handled listeners? Short-cut to avoid taking the
// instrumentation_lock_.
bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
@@ -709,8 +676,6 @@
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> invoke_virtual_or_interface_listeners_
- GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 9146245..31cfeb6 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -50,7 +50,6 @@
received_exception_thrown_event(false),
received_exception_handled_event(false),
received_branch_event(false),
- received_invoke_virtual_or_interface_event(false),
received_watched_frame_pop(false) {}
virtual ~TestInstrumentationListener() {}
@@ -146,15 +145,6 @@
received_branch_event = true;
}
- void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
- ArtMethod* caller ATTRIBUTE_UNUSED,
- uint32_t dex_pc ATTRIBUTE_UNUSED,
- ArtMethod* callee ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(Locks::mutator_lock_) {
- received_invoke_virtual_or_interface_event = true;
- }
-
void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED, const ShadowFrame& frame ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(Locks::mutator_lock_) {
received_watched_frame_pop = true;
@@ -172,7 +162,6 @@
received_exception_thrown_event = false;
received_exception_handled_event = false;
received_branch_event = false;
- received_invoke_virtual_or_interface_event = false;
received_watched_frame_pop = false;
}
@@ -187,7 +176,6 @@
bool received_exception_thrown_event;
bool received_exception_handled_event;
bool received_branch_event;
- bool received_invoke_virtual_or_interface_event;
bool received_watched_frame_pop;
private:
@@ -382,8 +370,6 @@
return instr->HasExceptionHandledListeners();
case instrumentation::Instrumentation::kBranch:
return instr->HasBranchListeners();
- case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
- return instr->HasInvokeVirtualOrInterfaceListeners();
case instrumentation::Instrumentation::kWatchedFramePop:
return instr->HasWatchedFramePopListeners();
default:
@@ -434,9 +420,6 @@
case instrumentation::Instrumentation::kBranch:
instr->Branch(self, method, dex_pc, -1);
break;
- case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
- instr->InvokeVirtualOrInterface(self, obj, method, dex_pc, method);
- break;
case instrumentation::Instrumentation::kWatchedFramePop:
instr->WatchedFramePopped(self, frame);
break;
@@ -477,8 +460,6 @@
return listener.received_exception_handled_event;
case instrumentation::Instrumentation::kBranch:
return listener.received_branch_event;
- case instrumentation::Instrumentation::kInvokeVirtualOrInterface:
- return listener.received_invoke_virtual_or_interface_event;
case instrumentation::Instrumentation::kWatchedFramePop:
return listener.received_watched_frame_pop;
default:
@@ -636,10 +617,6 @@
TestEvent(instrumentation::Instrumentation::kBranch);
}
-TEST_F(InstrumentationTest, InvokeVirtualOrInterfaceEvent) {
- TestEvent(instrumentation::Instrumentation::kInvokeVirtualOrInterface);
-}
-
TEST_F(InstrumentationTest, DeoptimizeDirectMethod) {
ScopedObjectAccess soa(Thread::Current());
jobject class_loader = LoadDex("Instrumentation");
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
new file mode 100644
index 0000000..8c7fb42
--- /dev/null
+++ b/runtime/intern_table-inl.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERN_TABLE_INL_H_
+#define ART_RUNTIME_INTERN_TABLE_INL_H_
+
+#include "intern_table.h"
+
+// Required for ToModifiedUtf8 below.
+#include "mirror/string-inl.h"
+
+namespace art {
+
+template <typename Visitor>
+inline void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space,
+ const Visitor& visitor) {
+ DCHECK(image_space != nullptr);
+ // Only add if we have the interned strings section.
+ const ImageSection& section = image_space->GetImageHeader().GetInternedStringsSection();
+ if (section.Size() > 0) {
+ AddTableFromMemory(image_space->Begin() + section.Offset(), visitor);
+ }
+}
+
+template <typename Visitor>
+inline size_t InternTable::AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor) {
+ size_t read_count = 0;
+ UnorderedSet set(ptr, /*make copy*/false, &read_count);
+ // Visit the unordered set, may remove elements.
+ visitor(set);
+ if (!set.empty()) {
+ MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+ strong_interns_.AddInternStrings(std::move(set));
+ }
+ return read_count;
+}
+
+inline void InternTable::Table::AddInternStrings(UnorderedSet&& intern_strings) {
+ static constexpr bool kCheckDuplicates = kIsDebugBuild;
+ if (kCheckDuplicates) {
+ // Avoid doing read barriers since the space might not yet be added to the heap.
+ // See b/117803941
+ for (GcRoot<mirror::String>& string : intern_strings) {
+ CHECK(Find(string.Read<kWithoutReadBarrier>()) == nullptr)
+ << "Already found " << string.Read<kWithoutReadBarrier>()->ToModifiedUtf8()
+ << " in the intern table";
+ }
+ }
+ // Insert at the front since we add new interns into the back.
+ tables_.insert(tables_.begin(), std::move(intern_strings));
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_INTERN_TABLE_INL_H_
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index c8aaa21..6fbfbdd 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -177,18 +177,6 @@
RemoveWeak(s);
}
-void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) {
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (gc::space::ImageSpace* image_space : image_spaces) {
- const ImageHeader* const header = &image_space->GetImageHeader();
- // Check if we have the interned strings section.
- const ImageSection& section = header->GetInternedStringsSection();
- if (section.Size() > 0) {
- AddTableFromMemoryLocked(image_space->Begin() + section.Offset());
- }
- }
-}
-
void InternTable::BroadcastForNewInterns() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::intern_table_lock_);
@@ -303,15 +291,6 @@
weak_interns_.SweepWeaks(visitor);
}
-size_t InternTable::AddTableFromMemory(const uint8_t* ptr) {
- MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- return AddTableFromMemoryLocked(ptr);
-}
-
-size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) {
- return strong_interns_.AddTableFromMemory(ptr);
-}
-
size_t InternTable::WriteToMemory(uint8_t* ptr) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
return strong_interns_.WriteToMemory(ptr);
@@ -363,25 +342,6 @@
}
}
-size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
- size_t read_count = 0;
- UnorderedSet set(ptr, /*make copy*/false, &read_count);
- if (set.empty()) {
- // Avoid inserting empty sets.
- return read_count;
- }
- // TODO: Disable this for app images if app images have intern tables.
- static constexpr bool kCheckDuplicates = kIsDebugBuild;
- if (kCheckDuplicates) {
- for (GcRoot<mirror::String>& string : set) {
- CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8();
- }
- }
- // Insert at the front since we add new interns into the back.
- tables_.insert(tables_.begin(), std::move(set));
- return read_count;
-}
-
size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
if (tables_.empty()) {
return 0;
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 5ba3e18..1bc89a1 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -59,6 +59,54 @@
*/
class InternTable {
public:
+ // Modified UTF-8-encoded string treated as UTF16.
+ class Utf8String {
+ public:
+ Utf8String(uint32_t utf16_length, const char* utf8_data, int32_t hash)
+ : hash_(hash), utf16_length_(utf16_length), utf8_data_(utf8_data) { }
+
+ int32_t GetHash() const { return hash_; }
+ uint32_t GetUtf16Length() const { return utf16_length_; }
+ const char* GetUtf8Data() const { return utf8_data_; }
+
+ private:
+ int32_t hash_;
+ uint32_t utf16_length_;
+ const char* utf8_data_;
+ };
+
+ class StringHashEquals {
+ public:
+ std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
+ bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
+ NO_THREAD_SAFETY_ANALYSIS;
+
+ // Utf8String can be used for lookup.
+ std::size_t operator()(const Utf8String& key) const {
+ // A cast to prevent undesired sign extension.
+ return static_cast<uint32_t>(key.GetHash());
+ }
+
+ bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
+ NO_THREAD_SAFETY_ANALYSIS;
+ };
+
+ class GcRootEmptyFn {
+ public:
+ void MakeEmpty(GcRoot<mirror::String>& item) const {
+ item = GcRoot<mirror::String>();
+ }
+ bool IsEmpty(const GcRoot<mirror::String>& item) const {
+ return item.IsNull();
+ }
+ };
+
+ using UnorderedSet = HashSet<GcRoot<mirror::String>,
+ GcRootEmptyFn,
+ StringHashEquals,
+ StringHashEquals,
+ TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>>;
+
InternTable();
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
@@ -119,10 +167,12 @@
void BroadcastForNewInterns();
- // Adds all of the resolved image strings from the image spaces into the intern table. The
- // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
- // images_added_to_intern_table_ to true.
- void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
+ // Add all of the strings in the image's intern table into this intern table. This is required so
+ // the intern table is correct.
+ // The visitor arg type is UnorderedSet
+ template <typename Visitor>
+ void AddImageStringsToTable(gc::space::ImageSpace* image_space,
+ const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Add a new intern table for inserting to, previous intern tables are still there but no
@@ -130,11 +180,6 @@
void AddNewTable()
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
- // Read the intern table from memory. The elements aren't copied, the intern hash set data will
- // point to somewhere within ptr. Only reads the strong interns.
- size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
// expected that there is no weak interns since this is called from the image writer.
size_t WriteToMemory(uint8_t* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -145,47 +190,6 @@
REQUIRES(!Locks::intern_table_lock_);
private:
- // Modified UTF-8-encoded string treated as UTF16.
- class Utf8String {
- public:
- Utf8String(uint32_t utf16_length, const char* utf8_data, int32_t hash)
- : hash_(hash), utf16_length_(utf16_length), utf8_data_(utf8_data) { }
-
- int32_t GetHash() const { return hash_; }
- uint32_t GetUtf16Length() const { return utf16_length_; }
- const char* GetUtf8Data() const { return utf8_data_; }
-
- private:
- int32_t hash_;
- uint32_t utf16_length_;
- const char* utf8_data_;
- };
-
- class StringHashEquals {
- public:
- std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
- bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
- NO_THREAD_SAFETY_ANALYSIS;
-
- // Utf8String can be used for lookup.
- std::size_t operator()(const Utf8String& key) const {
- // A cast to prevent undesired sign extension.
- return static_cast<uint32_t>(key.GetHash());
- }
-
- bool operator()(const GcRoot<mirror::String>& a, const Utf8String& b) const
- NO_THREAD_SAFETY_ANALYSIS;
- };
- class GcRootEmptyFn {
- public:
- void MakeEmpty(GcRoot<mirror::String>& item) const {
- item = GcRoot<mirror::String>();
- }
- bool IsEmpty(const GcRoot<mirror::String>& item) const {
- return item.IsNull();
- }
- };
-
// Table which holds pre zygote and post zygote interned strings. There is one instance for
// weak interns and strong interns.
class Table {
@@ -209,24 +213,28 @@
// Read and add an intern table from ptr.
// Tables read are inserted at the front of the table array. Only checks for conflicts in
// debug builds. Returns how many bytes were read.
- size_t AddTableFromMemory(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS for the visitor that may require locks.
+ template <typename Visitor>
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Write the intern tables to ptr, if there are multiple tables they are combined into a single
// one. Returns how many bytes were written.
size_t WriteToMemory(uint8_t* ptr)
REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
private:
- typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
- TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
-
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ // Add a table to the front of the tables vector.
+ void AddInternStrings(UnorderedSet&& intern_strings)
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
std::vector<UnorderedSet> tables_;
+ friend class InternTable;
friend class linker::ImageWriter;
ART_FRIEND_TEST(InternTableTest, CrossHash);
};
@@ -237,6 +245,11 @@
ObjPtr<mirror::String> Insert(ObjPtr<mirror::String> s, bool is_strong, bool holding_locks)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Add a table from memory to the strong interns.
+ template <typename Visitor>
+ size_t AddTableFromMemory(const uint8_t* ptr, const Visitor& visitor)
+ REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+
ObjPtr<mirror::String> LookupStrongLocked(ObjPtr<mirror::String> s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
ObjPtr<mirror::String> LookupWeakLocked(ObjPtr<mirror::String> s)
@@ -260,9 +273,6 @@
void RemoveWeakFromTransaction(ObjPtr<mirror::String> s)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
- size_t AddTableFromMemoryLocked(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
// Change the weak root state. May broadcast to waiters.
void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
REQUIRES(Locks::intern_table_lock_);
@@ -287,6 +297,7 @@
// Weak root state, used for concurrent system weak processing and more.
gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
+ friend class gc::space::ImageSpace;
friend class linker::ImageWriter;
friend class Transaction;
ART_FRIEND_TEST(InternTableTest, CrossHash);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 8b4fe44..b3bf1ba 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -78,7 +78,7 @@
GcRoot<mirror::String> str(mirror::String::AllocFromModifiedUtf8(soa.Self(), "00000000"));
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
- for (InternTable::Table::UnorderedSet& table : t.strong_interns_.tables_) {
+ for (InternTable::UnorderedSet& table : t.strong_interns_.tables_) {
// The negative hash value shall be 32-bit wide on every host.
ASSERT_TRUE(IsUint<32>(table.hashfn_(str)));
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 048c6e4..2ae95dc 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -56,7 +56,7 @@
ScopedObjectAccessUnchecked soa(self);
if (method->IsStatic()) {
if (shorty == "L") {
- typedef jobject (fntype)(JNIEnv*, jclass);
+ using fntype = jobject(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -67,35 +67,35 @@
}
result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "V") {
- typedef void (fntype)(JNIEnv*, jclass);
+ using fntype = void(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get());
} else if (shorty == "Z") {
- typedef jboolean (fntype)(JNIEnv*, jclass);
+ using fntype = jboolean(JNIEnv*, jclass);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get()));
} else if (shorty == "BI") {
- typedef jbyte (fntype)(JNIEnv*, jclass, jint);
+ using fntype = jbyte(JNIEnv*, jclass, jint);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetB(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "II") {
- typedef jint (fntype)(JNIEnv*, jclass, jint);
+ using fntype = jint(JNIEnv*, jclass, jint);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0]));
} else if (shorty == "LL") {
- typedef jobject (fntype)(JNIEnv*, jclass, jobject);
+ using fntype = jobject(JNIEnv*, jclass, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -108,14 +108,14 @@
}
result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "IIZ") {
- typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
+ using fntype = jint(JNIEnv*, jclass, jint, jboolean);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "ILI") {
- typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
+ using fntype = jint(JNIEnv*, jclass, jobject, jint);
fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
@@ -125,7 +125,7 @@
ScopedThreadStateChange tsc(self, kNative);
result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
} else if (shorty == "SIZ") {
- typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
+ using fntype = jshort(JNIEnv*, jclass, jint, jboolean);
fntype* const fn =
reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
ScopedLocalRef<jclass> klass(soa.Env(),
@@ -133,14 +133,14 @@
ScopedThreadStateChange tsc(self, kNative);
result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
} else if (shorty == "VIZ") {
- typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
+ using fntype = void(JNIEnv*, jclass, jint, jboolean);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], args[1]);
} else if (shorty == "ZLL") {
- typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
+ using fntype = jboolean(JNIEnv*, jclass, jobject, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -151,7 +151,7 @@
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
} else if (shorty == "ZILL") {
- typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
+ using fntype = jboolean(JNIEnv*, jclass, jint, jobject, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -162,7 +162,7 @@
ScopedThreadStateChange tsc(self, kNative);
result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
} else if (shorty == "VILII") {
- typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
+ using fntype = void(JNIEnv*, jclass, jint, jobject, jint, jint);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -171,7 +171,7 @@
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
} else if (shorty == "VLILII") {
- typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
+ using fntype = void(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jclass> klass(soa.Env(),
soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
@@ -187,7 +187,7 @@
}
} else {
if (shorty == "L") {
- typedef jobject (fntype)(JNIEnv*, jobject);
+ using fntype = jobject(JNIEnv*, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
@@ -198,14 +198,14 @@
}
result->SetL(soa.Decode<mirror::Object>(jresult));
} else if (shorty == "V") {
- typedef void (fntype)(JNIEnv*, jobject);
+ using fntype = void(JNIEnv*, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
ScopedThreadStateChange tsc(self, kNative);
fn(soa.Env(), rcvr.get());
} else if (shorty == "LL") {
- typedef jobject (fntype)(JNIEnv*, jobject, jobject);
+ using fntype = jobject(JNIEnv*, jobject, jobject);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
@@ -219,7 +219,7 @@
result->SetL(soa.Decode<mirror::Object>(jresult));
ScopedThreadStateChange tsc(self, kNative);
} else if (shorty == "III") {
- typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
+ using fntype = jint(JNIEnv*, jobject, jint, jint);
fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
ScopedLocalRef<jobject> rcvr(soa.Env(),
soa.AddLocalReference<jobject>(receiver));
@@ -261,6 +261,12 @@
shadow_frame.GetThisObject(accessor.InsSize()),
method,
0);
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ // The caller will retry this invoke. Just return immediately without any value.
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ DCHECK(PrevFrameWillRetry(self, shadow_frame));
+ return JValue();
+ }
if (UNLIKELY(self->IsExceptionPending())) {
instrumentation->MethodUnwindEvent(self,
shadow_frame.GetThisObject(accessor.InsSize()),
@@ -494,8 +500,8 @@
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
- // Are we executing the first shadow frame?
- bool first = true;
+ // How many frames we have executed.
+ size_t frame_cnt = 0;
while (shadow_frame != nullptr) {
// We do not want to recover lock state for lock counting when deoptimizing. Currently,
// the compiler should not have compiled a method that failed structured-locking checks.
@@ -510,24 +516,30 @@
// the instrumentation. To prevent from reporting it a second time, we simply pass a
// null Instrumentation*.
const instrumentation::Instrumentation* const instrumentation =
- first ? nullptr : Runtime::Current()->GetInstrumentation();
+ frame_cnt == 0 ? nullptr : Runtime::Current()->GetInstrumentation();
new_dex_pc = MoveToExceptionHandler(
self, *shadow_frame, instrumentation) ? shadow_frame->GetDexPC() : dex::kDexNoIndex;
} else if (!from_code) {
// Deoptimization is not called from code directly.
const Instruction* instr = &accessor.InstructionAt(dex_pc);
- if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc) {
- DCHECK(first);
+ if (deopt_method_type == DeoptimizationMethodType::kKeepDexPc ||
+ shadow_frame->GetForceRetryInstruction()) {
+ DCHECK(frame_cnt == 0 || (frame_cnt == 1 && shadow_frame->GetForceRetryInstruction()))
+ << "frame_cnt: " << frame_cnt
+ << " force-retry: " << shadow_frame->GetForceRetryInstruction();
// Need to re-execute the dex instruction.
// (1) An invocation might be split into class initialization and invoke.
// In this case, the invoke should not be skipped.
// (2) A suspend check should also execute the dex instruction at the
// corresponding dex pc.
+ // If the ForceRetryInstruction bit is set this must be the second frame (the first being
+ // the one that is being popped).
DCHECK_EQ(new_dex_pc, dex_pc);
+ shadow_frame->SetForceRetryInstruction(false);
} else if (instr->Opcode() == Instruction::MONITOR_ENTER ||
instr->Opcode() == Instruction::MONITOR_EXIT) {
DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
- DCHECK(first);
+ DCHECK_EQ(frame_cnt, 0u);
// Non-idempotent dex instruction should not be re-executed.
// On the other hand, if a MONITOR_ENTER is at the dex_pc of a suspend
// check, that MONITOR_ENTER should be executed. That case is handled
@@ -553,7 +565,7 @@
DCHECK_EQ(new_dex_pc, dex_pc);
} else {
DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
- DCHECK(first);
+ DCHECK_EQ(frame_cnt, 0u);
// By default, we re-execute the dex instruction since if they are not
// an invoke, so that we don't have to decode the dex instruction to move
// result into the right vreg. All slow paths have been audited to be
@@ -566,7 +578,7 @@
} else {
// Nothing to do, the dex_pc is the one at which the code requested
// the deoptimization.
- DCHECK(first);
+ DCHECK_EQ(frame_cnt, 0u);
DCHECK_EQ(new_dex_pc, dex_pc);
}
if (new_dex_pc != dex::kDexNoIndex) {
@@ -585,7 +597,7 @@
// and should advance dex pc past the invoke instruction.
from_code = false;
deopt_method_type = DeoptimizationMethodType::kDefault;
- first = false;
+ frame_cnt++;
}
ret_val->SetJ(value.GetJ());
}
@@ -657,5 +669,18 @@
InitMterpTls(self);
}
+bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame) {
+ ShadowFrame* prev_frame = frame.GetLink();
+ if (prev_frame == nullptr) {
+ NthCallerVisitor vis(self, 1, false);
+ vis.WalkStack();
+ prev_frame = vis.GetCurrentShadowFrame();
+ if (prev_frame == nullptr) {
+ prev_frame = self->FindDebuggerShadowFrame(vis.GetFrameId());
+ }
+ }
+ return prev_frame != nullptr && prev_frame->GetForceRetryInstruction();
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 0d43b90..d7e69a6 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -69,6 +69,12 @@
void InitInterpreterTls(Thread* self);
+// Returns true if the previous frame has the ForceRetryInstruction bit set. This is required for
+// ForPopFrame to work correctly since that will cause the java function return with null/0 which
+// might not be expected by the code being run.
+bool PrevFrameWillRetry(Thread* self, const ShadowFrame& frame)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.cc b/runtime/interpreter/interpreter_cache.cc
new file mode 100644
index 0000000..e43fe31
--- /dev/null
+++ b/runtime/interpreter/interpreter_cache.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter_cache.h"
+#include "thread-inl.h"
+
+namespace art {
+
+void InterpreterCache::Clear(Thread* owning_thread) {
+ DCHECK(owning_thread->GetInterpreterCache() == this);
+ DCHECK(owning_thread == Thread::Current() || owning_thread->IsSuspended());
+ data_.fill(Entry{});
+}
+
+bool InterpreterCache::IsCalledFromOwningThread() {
+ return Thread::Current()->GetInterpreterCache() == this;
+}
+
+} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
new file mode 100644
index 0000000..355058f
--- /dev/null
+++ b/runtime/interpreter/interpreter_cache.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
+
+#include <array>
+#include <atomic>
+
+#include "base/bit_utils.h"
+#include "base/macros.h"
+
+namespace art {
+
+class Thread;
+
+// Small fast thread-local cache for the interpreter.
+// It can hold arbitrary pointer-sized key-value pair.
+// The interpretation of the value depends on the key.
+// Presence of entry might imply some pre-conditions.
+// All operations must be done from the owning thread,
+// or at a point when the owning thread is suspended.
+//
+// The key-value pairs stored in the cache currently are:
+// iget/iput: The field offset. The field must be non-volatile.
+// sget/sput: The ArtField* pointer. The field must be non-volitile.
+// invoke: The ArtMethod* pointer (before vtable indirection, etc).
+// ArtMethod*: The ImtIndex of the method.
+//
+// We ensure consistency of the cache by clearing it
+// whenever any dex file is unloaded.
+//
+// Aligned to 16-bytes to make it easier to get the address of the cache
+// from assembly (it ensures that the offset is valid immediate value).
+class ALIGNED(16) InterpreterCache {
+ // Aligned since we load the whole entry in single assembly instruction.
+ typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
+
+ public:
+ // 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
+ // Value of 256 has around 75% cache hit rate.
+ static constexpr size_t kSize = 256;
+
+ InterpreterCache() {
+ // We can not use the Clear() method since the constructor will not
+ // be called from the owning thread.
+ data_.fill(Entry{});
+ }
+
+ // Clear the whole cache. It requires the owning thread for DCHECKs.
+ void Clear(Thread* owning_thread);
+
+ ALWAYS_INLINE bool Get(const void* key, /* out */ size_t* value) {
+ DCHECK(IsCalledFromOwningThread());
+ Entry& entry = data_[IndexOf(key)];
+ if (LIKELY(entry.first == key)) {
+ *value = entry.second;
+ return true;
+ }
+ return false;
+ }
+
+ ALWAYS_INLINE void Set(const void* key, size_t value) {
+ DCHECK(IsCalledFromOwningThread());
+ data_[IndexOf(key)] = Entry{key, value};
+ }
+
+ private:
+ bool IsCalledFromOwningThread();
+
+ static ALWAYS_INLINE size_t IndexOf(const void* key) {
+ static_assert(IsPowerOfTwo(kSize), "Size must be power of two");
+ size_t index = (reinterpret_cast<uintptr_t>(key) >> 2) & (kSize - 1);
+ DCHECK_LT(index, kSize);
+ return index;
+ }
+
+ std::array<Entry, kSize> data_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_CACHE_H_
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 92d4731..b170232 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -18,6 +18,7 @@
#include <cmath>
+#include "base/casts.h"
#include "base/enums.h"
#include "class_root.h"
#include "debugger.h"
@@ -371,6 +372,12 @@
if (UNLIKELY(self->IsExceptionPending())) {
return false;
}
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+ // Don't actually set the field. The next instruction will force us to pop.
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ DCHECK(PrevFrameWillRetry(self, shadow_frame));
+ return true;
+ }
}
// Note: iput-x-quick instructions are only for non-volatile fields.
switch (field_type) {
@@ -440,6 +447,11 @@
self->IsExceptionThrownByCurrentMethod(exception.Get())) {
// See b/65049545 for why we don't need to check to see if the exception has changed.
instrumentation->ExceptionThrownEvent(self, exception.Get());
+ if (shadow_frame.GetForcePopFrame()) {
+ // We will check in the caller for GetForcePopFrame again. We need to bail out early to
+ // prevent an ExceptionHandledEvent from also being sent before popping.
+ return true;
+ }
}
bool clear_exception = false;
uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(
@@ -584,10 +596,10 @@
for (uint32_t i = 0, e = shadow_frame->NumberOfVRegs(); i < e; ++i) {
if (shadow_frame->GetVRegReference(i) == existing) {
DCHECK_EQ(shadow_frame->GetVRegReference(i),
- reinterpret_cast<mirror::Object*>(shadow_frame->GetVReg(i)));
+ reinterpret_cast32<mirror::Object*>(shadow_frame->GetVReg(i)));
shadow_frame->SetVRegReference(i, result.GetL());
DCHECK_EQ(shadow_frame->GetVRegReference(i),
- reinterpret_cast<mirror::Object*>(shadow_frame->GetVReg(i)));
+ reinterpret_cast32<mirror::Object*>(shadow_frame->GetVReg(i)));
}
}
}
@@ -1445,7 +1457,7 @@
// If both register locations contains the same value, the register probably holds a reference.
// Note: As an optimization, non-moving collectors leave a stale reference value
// in the references array even after the original vreg was overwritten to a non-reference.
- if (src_value == reinterpret_cast<uintptr_t>(o.Ptr())) {
+ if (src_value == reinterpret_cast32<uint32_t>(o.Ptr())) {
new_shadow_frame->SetVRegReference(dest_reg, o);
} else {
new_shadow_frame->SetVReg(dest_reg, src_value);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index b324b4c..26bfba9 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -121,92 +121,78 @@
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
-// Handles streamlined non-range invoke static, direct and virtual instructions originating in
-// mterp. Access checks and instrumentation other than jit profiling are not supported, but does
-// support interpreter intrinsics if applicable.
-// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type>
-static inline bool DoFastInvoke(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result) {
- const uint32_t method_idx = inst->VRegB_35c();
- const uint32_t vregC = inst->VRegC_35c();
- ObjPtr<mirror::Object> receiver = (type == kStatic)
- ? nullptr
- : shadow_frame.GetVRegReference(vregC);
- ArtMethod* sf_method = shadow_frame.GetMethod();
- ArtMethod* const called_method = FindMethodFromCode<type, false>(
- method_idx, &receiver, sf_method, self);
- // The shadow frame should already be pushed, so we don't need to update it.
- if (UNLIKELY(called_method == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- } else if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
- result->SetJ(0);
- return false;
- } else {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && type == kVirtual) {
- jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
- }
- if (called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- return !self->IsExceptionPending();
- }
- }
- return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
- }
-}
-
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check>
-static inline bool DoInvoke(Thread* self,
- ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result) {
+template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
+static ALWAYS_INLINE bool DoInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure to check for async exceptions before anything else.
if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ ArtMethod* sf_method = shadow_frame.GetMethod();
+
+ // Try to find the method in small thread-local cache first.
+ InterpreterCache* tls_cache = self->GetInterpreterCache();
+ size_t tls_value;
+ ArtMethod* resolved_method;
+ if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+ resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
+ } else {
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ constexpr ClassLinker::ResolveMode resolve_mode =
+ do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
+ : ClassLinker::ResolveMode::kNoChecks;
+ resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
+ if (UNLIKELY(resolved_method == nullptr)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ }
+ tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+ }
+
+ // Null pointer check and virtual method resolution.
ObjPtr<mirror::Object> receiver =
(type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- ArtMethod* sf_method = shadow_frame.GetMethod();
- ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
- method_idx, &receiver, sf_method, self);
+ ArtMethod* const called_method = FindMethodToCall<type, do_access_check>(
+ method_idx, resolved_method, &receiver, sf_method, self);
+
// The shadow frame should already be pushed, so we don't need to update it.
if (UNLIKELY(called_method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
- } else if (UNLIKELY(!called_method->IsInvokable())) {
+ }
+ if (UNLIKELY(!called_method->IsInvokable())) {
called_method->ThrowInvocationTimeError();
result->SetJ(0);
return false;
- } else {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && (type == kVirtual || type == kInterface)) {
- jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
- }
- // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
- if (type == kVirtual || type == kInterface) {
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
- instrumentation->InvokeVirtualOrInterface(
- self, receiver.Ptr(), sf_method, shadow_frame.GetDexPC(), called_method);
- }
- }
- return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
}
+
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && (type == kVirtual || type == kInterface)) {
+ jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+
+ if (is_mterp && !is_range && called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ if (jit != nullptr && sf_method != nullptr) {
+ jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
+ }
+ return !self->IsExceptionPending();
+ }
+ }
+
+ return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
+ result);
}
static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
@@ -277,7 +263,8 @@
template<bool is_range>
static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data,
- JValue* result) {
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
ObjPtr<mirror::Object> const receiver = shadow_frame.GetVRegReference(vregC);
if (UNLIKELY(receiver == nullptr)) {
@@ -305,12 +292,6 @@
receiver, shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
jit->AddSamples(self, shadow_frame.GetMethod(), 1, /*with_backedges*/false);
}
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- // TODO: Remove the InvokeVirtualOrInterface instrumentation, as it was only used by the JIT.
- if (UNLIKELY(instrumentation->HasInvokeVirtualOrInterfaceListeners())) {
- instrumentation->InvokeVirtualOrInterface(
- self, receiver.Ptr(), shadow_frame.GetMethod(), shadow_frame.GetDexPC(), called_method);
- }
// No need to check since we've been quickened.
return DoCall<is_range, false>(called_method, self, shadow_frame, inst, inst_data, result);
}
@@ -601,52 +582,6 @@
uint16_t this_obj_vreg,
JValue result);
-// Explicitly instantiate all DoInvoke functions.
-#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvoke<_type, _is_range, _do_check>(Thread* self, \
- ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
-
-#define EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(_type) \
- EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false); \
- EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, true); \
- EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, false); \
- EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, true, true);
-
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kStatic) // invoke-static/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kDirect) // invoke-direct/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kVirtual) // invoke-virtual/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kSuper) // invoke-super/range.
-EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range.
-#undef EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL
-#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
-
-// Explicitly instantiate all DoFastInvoke functions.
-#define EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(_type) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoFastInvoke<_type>(Thread* self, \
- ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
-
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kStatic); // invoke-static
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kDirect); // invoke-direct
-EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kVirtual); // invoke-virtual
-#undef EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL
-
-// Explicitly instantiate all DoInvokeVirtualQuick functions.
-#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
- JValue* result)
-
-EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false); // invoke-virtual-quick.
-EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range.
-#undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK
-
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 69dae31..17b3cd4 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -116,10 +116,10 @@
UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
// java.lang.Long.rotateRight(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
// java.lang.Long.rotateLeft(JI)J
-BINARY_JJ_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
+BINARY_JI_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
// java.lang.Long.signum(J)I
UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 2762629..d9f76ee 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -17,22 +17,46 @@
#include "interpreter_switch_impl.h"
#include "base/enums.h"
+#include "base/memory_tool.h"
#include "base/quasi_atomic.h"
#include "dex/dex_file_types.h"
#include "experimental_flags.h"
#include "interpreter_common.h"
#include "jit/jit.h"
#include "jvalue-inl.h"
+#include "nth_caller_visitor.h"
#include "safe_math.h"
#include "shadow_frame-inl.h"
+#include "thread.h"
namespace art {
namespace interpreter {
+#define CHECK_FORCE_RETURN() \
+ do { \
+ if (UNLIKELY(shadow_frame.GetForcePopFrame())) { \
+ DCHECK(PrevFrameWillRetry(self, shadow_frame)) \
+ << "Pop frame forced without previous frame ready to retry instruction!"; \
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled()); \
+ if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) { \
+ SendMethodExitEvents(self, \
+ instrumentation, \
+ shadow_frame, \
+ shadow_frame.GetThisObject(accessor.InsSize()), \
+ shadow_frame.GetMethod(), \
+ inst->GetDexPc(insns), \
+ JValue()); \
+ } \
+ ctx->result = JValue(); /* Handled in caller. */ \
+ return; \
+ } \
+ } while (false)
+
#define HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instr) \
do { \
DCHECK(self->IsExceptionPending()); \
self->AllowThreadSuspension(); \
+ CHECK_FORCE_RETURN(); \
if (!MoveToExceptionHandler(self, shadow_frame, instr)) { \
/* Structured locking is to be enforced for abnormal termination, too. */ \
DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); \
@@ -43,6 +67,7 @@
ctx->result = JValue(); /* Handled in caller. */ \
return; \
} else { \
+ CHECK_FORCE_RETURN(); \
int32_t displacement = \
static_cast<int32_t>(shadow_frame.GetDexPC()) - static_cast<int32_t>(dex_pc); \
inst = inst->RelativeAt(displacement); \
@@ -51,8 +76,39 @@
#define HANDLE_PENDING_EXCEPTION() HANDLE_PENDING_EXCEPTION_WITH_INSTRUMENTATION(instrumentation)
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_IMPL(_is_exception_pending, _next_function) \
+ do { \
+ if (UNLIKELY(shadow_frame.GetForceRetryInstruction())) { \
+ /* Don't need to do anything except clear the flag and exception. We leave the */ \
+ /* instruction the same so it will be re-executed on the next go-around. */ \
+ DCHECK(inst->IsInvoke()); \
+ shadow_frame.SetForceRetryInstruction(false); \
+ if (UNLIKELY(_is_exception_pending)) { \
+ DCHECK(self->IsExceptionPending()); \
+ if (kIsDebugBuild) { \
+ LOG(WARNING) << "Suppressing exception for instruction-retry: " \
+ << self->GetException()->Dump(); \
+ } \
+ self->ClearException(); \
+ } \
+ } else if (UNLIKELY(_is_exception_pending)) { \
+ /* Should have succeeded. */ \
+ DCHECK(!shadow_frame.GetForceRetryInstruction()); \
+ HANDLE_PENDING_EXCEPTION(); \
+ } else { \
+ inst = inst->_next_function(); \
+ } \
+ } while (false)
+
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(_is_exception_pending) \
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_IMPL(_is_exception_pending, Next_4xx)
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(_is_exception_pending) \
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_IMPL(_is_exception_pending, Next_3xx)
+
#define POSSIBLY_HANDLE_PENDING_EXCEPTION(_is_exception_pending, _next_function) \
do { \
+ /* Should only be on invoke instructions. */ \
+ DCHECK(!shadow_frame.GetForceRetryInstruction()); \
if (UNLIKELY(_is_exception_pending)) { \
HANDLE_PENDING_EXCEPTION(); \
} else { \
@@ -66,17 +122,22 @@
}
// Code to run before each dex instruction.
-#define PREAMBLE_SAVE(save_ref) \
+#define PREAMBLE_SAVE(save_ref) \
{ \
- if (UNLIKELY(instrumentation->HasDexPcListeners()) && \
- UNLIKELY(!DoDexPcMoveEvent(self, \
- accessor, \
- shadow_frame, \
- dex_pc, \
- instrumentation, \
- save_ref))) { \
- HANDLE_PENDING_EXCEPTION(); \
- break; \
+ /* We need to put this before & after the instrumentation to avoid having to put in a */ \
+ /* post-script macro. */ \
+ CHECK_FORCE_RETURN(); \
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) { \
+ if (UNLIKELY(!DoDexPcMoveEvent(self, \
+ accessor, \
+ shadow_frame, \
+ dex_pc, \
+ instrumentation, \
+ save_ref))) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ break; \
+ } \
+ CHECK_FORCE_RETURN(); \
} \
} \
do {} while (false)
@@ -180,7 +241,8 @@
const JValue& result)
REQUIRES_SHARED(Locks::mutator_lock_) {
bool had_event = false;
- if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
+ // We don't send method-exit if it's a pop-frame. We still send frame_popped though.
+ if (UNLIKELY(instrumentation->HasMethodExitListeners() && !frame.GetForcePopFrame())) {
had_event = true;
instrumentation->MethodExitEvent(self, thiz.Ptr(), method, dex_pc, result);
}
@@ -195,8 +257,11 @@
}
}
+// TODO On ASAN builds this function gets a huge stack frame. Since normally we run in the mterp
+// this shouldn't cause any problems for stack overflow detection. Remove this once b/117341496 is
+// fixed.
template<bool do_access_check, bool transaction_active>
-void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
+ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
Thread* self = ctx->self;
const CodeItemDataAccessor& accessor = ctx->accessor;
ShadowFrame& shadow_frame = ctx->shadow_frame;
@@ -217,6 +282,9 @@
uint16_t inst_data;
jit::Jit* jit = Runtime::Current()->GetJit();
+ DCHECK(!shadow_frame.GetForceRetryInstruction())
+ << "Entered interpreter from invoke without retry instruction being handled!";
+
do {
dex_pc = inst->GetDexPc(insns);
shadow_frame.SetDexPC(dex_pc);
@@ -1601,86 +1669,86 @@
}
case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, false, do_access_check>(
+ bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kVirtual, true, do_access_check>(
+ bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_SUPER: {
PREAMBLE();
- bool success = DoInvoke<kSuper, false, do_access_check>(
+ bool success = DoInvoke<kSuper, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kSuper, true, do_access_check>(
+ bool success = DoInvoke<kSuper, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- bool success = DoInvoke<kDirect, false, do_access_check>(
+ bool success = DoInvoke<kDirect, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kDirect, true, do_access_check>(
+ bool success = DoInvoke<kDirect, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, false, do_access_check>(
+ bool success = DoInvoke<kInterface, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kInterface, true, do_access_check>(
+ bool success = DoInvoke<kInterface, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_STATIC: {
PREAMBLE();
- bool success = DoInvoke<kStatic, false, do_access_check>(
+ bool success = DoInvoke<kStatic, false, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- bool success = DoInvoke<kStatic, true, do_access_check>(
+ bool success = DoInvoke<kStatic, true, do_access_check, /*is_mterp=*/ false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
bool success = DoInvokeVirtualQuick<false>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
bool success = DoInvokeVirtualQuick<true>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_POLYMORPHIC: {
@@ -1688,7 +1756,7 @@
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
bool success = DoInvokePolymorphic<false /* is_range */>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
}
case Instruction::INVOKE_POLYMORPHIC_RANGE: {
@@ -1696,7 +1764,7 @@
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
bool success = DoInvokePolymorphic<true /* is_range */>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_4xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
break;
}
case Instruction::INVOKE_CUSTOM: {
@@ -1704,7 +1772,7 @@
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
bool success = DoInvokeCustom<false /* is_range */>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::INVOKE_CUSTOM_RANGE: {
@@ -1712,7 +1780,7 @@
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
bool success = DoInvokeCustom<true /* is_range */>(
self, shadow_frame, inst, inst_data, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
break;
}
case Instruction::NEG_INT:
diff --git a/runtime/interpreter/mterp/Makefile_mterp b/runtime/interpreter/mterp/Makefile_mterp
deleted file mode 100644
index ac8da69..0000000
--- a/runtime/interpreter/mterp/Makefile_mterp
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Makefile for the Art fast interpreter. This is not currently
-# integrated into the build system.
-#
-
-SHELL := /bin/sh
-
-# Build system has TARGET_ARCH=arm, but we can support the exact architecture
-# if it is worthwhile.
-#
-# To generate sources:
-# for arch in arm arm64 x86 x86_64 mips mips64
-# do
-# TARGET_ARCH_EXT=$arch make -f Makefile_mterp
-# done
-#
-
-OUTPUT_DIR := out
-
-# Accumulate all possible dependencies for the generated files in a very
-# conservative fashion. If it's not one of the generated files in "out",
-# assume it's a dependency.
-SOURCE_DEPS := \
- $(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print) \
-
-# Source files generated by the script. There's always one C and one
-# assembly file, though in practice one or the other could be empty.
-GEN_SOURCES := \
- $(OUTPUT_DIR)/interp_asm_$(TARGET_ARCH_EXT).S
-
-target: $(GEN_SOURCES)
-
-$(GEN_SOURCES): $(SOURCE_DEPS)
- @mkdir -p out
- ./gen_mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)
diff --git a/runtime/interpreter/mterp/README.txt b/runtime/interpreter/mterp/README.txt
index 19e02be..54bb634 100644
--- a/runtime/interpreter/mterp/README.txt
+++ b/runtime/interpreter/mterp/README.txt
@@ -1,108 +1,29 @@
-rt "mterp" README
-
-NOTE: Find rebuilding instructions at the bottom of this file.
-
-
==== Overview ====
-Every configuration has a "config-*" file that controls how the sources
-are generated. The sources are written into the "out" directory, where
+The assembly source code is produced from custom python-based templates.
+All the architecture-specific template files are concatenated to create
+one big python script. This generated python script is then executed to
+produced the final assembly file. The template syntax is:
+ * Lines starting with % are python code. They will be copied as-is to
+ the script (without the %) and thus executed during the generation.
+ * Other lines are text, and they are essentially syntax sugar for
+ out.write('''(line text)''') and thus they write the main output.
+ * Within a text line, $ can be used insert variables from code.
+
+The final assembly sources are written into the "out" directory, where
they are picked up by the Android build system.
The best way to become familiar with the interpreter is to look at the
generated files in the "out" directory.
-==== Config file format ====
-
-The config files are parsed from top to bottom. Each line in the file
-may be blank, hold a comment (line starts with '#'), or be a command.
-
-The commands are:
-
- handler-style <computed-goto|jump-table>
-
- Specify which style of interpreter to generate. In computed-goto,
- each handler is allocated a fixed region, allowing transitions to
- be done via table-start-address + (opcode * handler-size). With
- jump-table style, handlers may be of any length, and the generated
- table is an array of pointers to the handlers. This command is required,
- and must be the first command in the config file.
-
- handler-size <bytes>
-
- Specify the size of the fixed region, in bytes. On most platforms
- this will need to be a power of 2. For jump-table implementations,
- this command is ignored.
-
- import <filename>
-
- The specified file is included immediately, in its entirety. No
- substitutions are performed. ".cpp" and ".h" files are copied to the
- C output, ".S" files are copied to the asm output.
-
- asm-alt-stub <filename>
-
- When present, this command will cause the generation of an alternate
- set of entry points (for computed-goto interpreters) or an alternate
- jump table (for jump-table interpreters).
-
- fallback-stub <filename>
-
- Specifies a file to be used for the special FALLBACK tag on the "op"
- command below. Intended to be used to transfer control to an alternate
- interpreter to single-step a not-yet-implemented opcode. Note: should
- note be used on RETURN-class instructions.
-
- op-start <directory>
-
- Indicates the start of the opcode list. Must precede any "op"
- commands. The specified directory is the default location to pull
- instruction files from.
-
- op <opcode> <directory>|FALLBACK
-
- Can only appear after "op-start" and before "op-end". Overrides the
- default source file location of the specified opcode. The opcode
- definition will come from the specified file, e.g. "op OP_NOP arm"
- will load from "arm/OP_NOP.S". A substitution dictionary will be
- applied (see below). If the special "FALLBACK" token is used instead of
- a directory name, the source file specified in fallback-stub will instead
- be used for this opcode.
-
- alt <opcode> <directory>
-
- Can only appear after "op-start" and before "op-end". Similar to the
- "op" command above, but denotes a source file to override the entry
- in the alternate handler table. The opcode definition will come from
- the specified file, e.g. "alt OP_NOP arm" will load from
- "arm/ALT_OP_NOP.S". A substitution dictionary will be applied
- (see below).
-
- op-end
-
- Indicates the end of the opcode list. All kNumPackedOpcodes
- opcodes are emitted when this is seen, followed by any code that
- didn't fit inside the fixed-size instruction handler space.
-
-The order of "op" and "alt" directives are not significant; the generation
-tool will extract ordering info from the VM sources.
-
-Typically the form in which most opcodes currently exist is used in
-the "op-start" directive.
-
==== Instruction file format ====
The assembly instruction files are simply fragments of assembly sources.
The starting label will be provided by the generation tool, as will
-declarations for the segment type and alignment. The expected target
-assembler is GNU "as", but others will work (may require fiddling with
-some of the pseudo-ops emitted by the generation tool).
+declarations for the segment type and alignment.
-A substitution dictionary is applied to all opcode fragments as they are
-appended to the output. Substitutions can look like "$value" or "${value}".
-
-The dictionary always includes:
+The following global variables are generally available:
$opcode - opcode name, e.g. "OP_NOP"
$opnum - opcode number, e.g. 0 for OP_NOP
@@ -113,29 +34,6 @@
so you can take advantage of C-style comments and preprocessor directives
like "#define".
-Some generator operations are available.
-
- %include "filename" [subst-dict]
-
- Includes the file, which should look like "arm/OP_NOP.S". You can
- specify values for the substitution dictionary, using standard Python
- syntax. For example, this:
- %include "arm/unop.S" {"result":"r1"}
- would insert "arm/unop.S" at the current file position, replacing
- occurrences of "$result" with "r1".
-
- %default <subst-dict>
-
- Specify default substitution dictionary values, using standard Python
- syntax. Useful if you want to have a "base" version and variants.
-
- %break
-
- Identifies the split between the main portion of the instruction
- handler (which must fit in "handler-size" bytes) and the "sister"
- code, which is appended to the end of the instruction handler block.
- In jump table implementations, %break is ignored.
-
The generation tool does *not* print a warning if your instructions
exceed "handler-size", but the VM will abort on startup if it detects an
oversized handler. On architectures with fixed-width instructions this
@@ -153,20 +51,6 @@
message and abort during startup.
-==== Development tips ====
-
-If you need to debug the initial piece of an opcode handler, and your
-debug code expands it beyond the handler size limit, you can insert a
-generic header at the top:
-
- b ${opcode}_start
-%break
-${opcode}_start:
-
-If you already have a %break, it's okay to leave it in place -- the second
-%break is ignored.
-
-
==== Rebuilding ====
If you change any of the source file fragments, you need to rebuild the
@@ -174,7 +58,7 @@
"out" are editable, then:
$ cd mterp
- $ ./rebuild.sh
+ $ ./gen_mterp.py
The ultimate goal is to have the build system generate the necessary
output files without requiring this separate step, but we're not yet
diff --git a/runtime/interpreter/mterp/arm/alt_stub.S b/runtime/interpreter/mterp/arm/alt_stub.S
deleted file mode 100644
index 8799d95..0000000
--- a/runtime/interpreter/mterp/arm/alt_stub.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_${opcode}
- sub lr, lr, #(.L_ALT_${opcode} - .L_${opcode}) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
new file mode 100644
index 0000000..6413b63
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/arithmetic.S
@@ -0,0 +1,975 @@
+%def binop(preinstr="", result="r0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+%def binop2addr(preinstr="", result="r0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ $preinstr @ optional op; may set condition codes
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+%def binopLit16(result="r0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ .if $chkzero
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+%def binopLit8(extract="asr r1, r3, #8", result="r0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * You can override "extract" if the extraction of the literal value
+ * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ $extract @ optional; typically r1<- ssssssCC (sign extended)
+ .if $chkzero
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+ $instr @ $result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG $result, r9 @ vAA<- $result
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+%def binopWide(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov rINST, rINST, lsr #8 @ rINST<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 14-17 instructions */
+
+%def binopWide2addr(preinstr="", result0="r0", result1="r1", chkzero="0", instr=""):
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if $chkzero
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ result<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 12-15 instructions */
+
+%def unop(preinstr="", instr=""):
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r3 @ r0<- vB
+ $preinstr @ optional op; may set condition codes
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 8-9 instructions */
+
+%def unopNarrower(preinstr="", instr=""):
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for op_move.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+%def unopWide(preinstr="", instr=""):
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $preinstr @ optional op; may set condition codes
+ $instr @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-11 instructions */
+
+%def unopWider(preinstr="", instr=""):
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
+ GET_VREG r0, r3 @ r0<- vB
+ VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
+ $preinstr @ optional op; may set condition codes
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 9-10 instructions */
+
+%def op_add_int():
+% binop(instr="add r0, r0, r1")
+
+%def op_add_int_2addr():
+% binop2addr(instr="add r0, r0, r1")
+
+%def op_add_int_lit16():
+% binopLit16(instr="add r0, r0, r1")
+
+%def op_add_int_lit8():
+% binopLit8(extract="", instr="add r0, r0, r3, asr #8")
+
+%def op_add_long():
+% binopWide(preinstr="adds r0, r0, r2", instr="adc r1, r1, r3")
+
+%def op_add_long_2addr():
+% binopWide2addr(preinstr="adds r0, r0, r2", instr="adc r1, r1, r3")
+
+%def op_and_int():
+% binop(instr="and r0, r0, r1")
+
+%def op_and_int_2addr():
+% binop2addr(instr="and r0, r0, r1")
+
+%def op_and_int_lit16():
+% binopLit16(instr="and r0, r0, r1")
+
+%def op_and_int_lit8():
+% binopLit8(extract="", instr="and r0, r0, r3, asr #8")
+
+%def op_and_long():
+% binopWide(preinstr="and r0, r0, r2", instr="and r1, r1, r3")
+
+%def op_and_long_2addr():
+% binopWide2addr(preinstr="and r0, r0, r2", instr="and r1, r1, r3")
+
+%def op_cmp_long():
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r0, r2
+ sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
+ mov ip, #0
+ mvnlt ip, #0 @ -1
+ cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
+ orrne ip, #1
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG ip, r9 @ vAA<- ip
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_div_int():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+%def op_div_int_2addr():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+%def op_div_int_lit16():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+%def op_div_int_lit8():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * div-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r0, r0, r1 @ r0<- op
+#else
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+%def op_div_long():
+% binopWide(instr="bl __aeabi_ldivmod", chkzero="1")
+
+%def op_div_long_2addr():
+% binopWide2addr(instr="bl __aeabi_ldivmod", chkzero="1")
+
+%def op_int_to_byte():
+% unop(instr="sxtb r0, r0")
+
+%def op_int_to_char():
+% unop(instr="uxth r0, r0")
+
+%def op_int_to_long():
+% unopWider(instr="mov r1, r0, asr #31")
+
+%def op_int_to_short():
+% unop(instr="sxth r0, r0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+% binop(instr="mul r0, r1, r0")
+
+%def op_mul_int_2addr():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+% binop2addr(instr="mul r0, r1, r0")
+
+%def op_mul_int_lit16():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+% binopLit16(instr="mul r0, r1, r0")
+
+%def op_mul_int_lit8():
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+% binopLit8(instr="mul r0, r1, r0")
+
+%def op_mul_long():
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r1, lr, r2, r0 @ r1/lr <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
+ CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_mul_long_2addr():
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See op_mul_long for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
+ VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r1, lr, r2, r0 @ r1/lr <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_neg_int():
+% unop(instr="rsb r0, r0, #0")
+
+%def op_neg_long():
+% unopWide(preinstr="rsbs r0, r0, #0", instr="rsc r1, r1, #0")
+
+%def op_not_int():
+% unop(instr="mvn r0, r0")
+
+%def op_not_long():
+% unopWide(preinstr="mvn r0, r0", instr="mvn r1, r1")
+
+%def op_or_int():
+% binop(instr="orr r0, r0, r1")
+
+%def op_or_int_2addr():
+% binop2addr(instr="orr r0, r0, r1")
+
+%def op_or_int_lit16():
+% binopLit16(instr="orr r0, r0, r1")
+
+%def op_or_int_lit8():
+% binopLit8(extract="", instr="orr r0, r0, r3, asr #8")
+
+%def op_or_long():
+% binopWide(preinstr="orr r0, r0, r2", instr="orr r1, r1, r3")
+
+%def op_or_long_2addr():
+% binopWide2addr(preinstr="orr r0, r0, r2", instr="orr r1, r1, r3")
+
+%def op_rem_int():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int
+ *
+ */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG r1, r3 @ r1<- vCC
+ GET_VREG r0, r2 @ r0<- vBB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 11-14 instructions */
+
+%def op_rem_int_2addr():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/2addr
+ *
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r1, r3 @ r1<- vB
+ GET_VREG r0, r9 @ r0<- vA
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+
+%def op_rem_int_lit16():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit16
+ *
+ */
+ FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r0, r2 @ r0<- vB
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-13 instructions */
+
+%def op_rem_int_lit8():
+ /*
+ * Specialized 32-bit binary operation
+ *
+ * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
+ * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
+ * ARMv7 CPUs that have hardware division support).
+ *
+ * NOTE: idivmod returns quotient in r0 and remainder in r1
+ *
+ * rem-int/lit8
+ *
+ */
+ FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG r0, r2 @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+
+#ifdef __ARM_ARCH_EXT_IDIV__
+ sdiv r2, r0, r1
+ mls r1, r1, r2, r0 @ r1<- op
+#else
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+#endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r1, r9 @ vAA<- r1
+ GOTO_OPCODE ip @ jump to next instruction
+ /* 10-12 instructions */
+
+%def op_rem_long():
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+% binopWide(instr="bl __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
+
+%def op_rem_long_2addr():
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+% binopWide2addr(instr="bl __aeabi_ldivmod", result0="r2", result1="r3", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+% binopLit16(instr="rsb r0, r0, r1")
+
+%def op_rsub_int_lit8():
+% binopLit8(extract="", instr="rsb r0, r0, r3, asr #8")
+
+%def op_shl_int():
+% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, asl r1")
+
+%def op_shl_int_2addr():
+% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, asl r1")
+
+%def op_shl_int_lit8():
+% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, asl r1")
+
+%def op_shl_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_shl_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_shr_int():
+% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, asr r1")
+
+%def op_shr_int_2addr():
+% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, asr r1")
+
+%def op_shr_int_lit8():
+% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, asr r1")
+
+%def op_shr_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_shr_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_sub_int():
+% binop(instr="sub r0, r0, r1")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="sub r0, r0, r1")
+
+%def op_sub_long():
+% binopWide(preinstr="subs r0, r0, r2", instr="sbc r1, r1, r3")
+
+%def op_sub_long_2addr():
+% binopWide2addr(preinstr="subs r0, r0, r2", instr="sbc r1, r1, r3")
+
+%def op_ushr_int():
+% binop(preinstr="and r1, r1, #31", instr="mov r0, r0, lsr r1")
+
+%def op_ushr_int_2addr():
+% binop2addr(preinstr="and r1, r1, #31", instr="mov r0, r0, lsr r1")
+
+%def op_ushr_int_lit8():
+% binopLit8(extract="ubfx r1, r3, #8, #5", instr="mov r0, r0, lsr r1")
+
+%def op_ushr_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
+ GET_VREG r2, r0 @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_ushr_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ GET_VREG r2, r3 @ r2<- vB
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_xor_int():
+% binop(instr="eor r0, r0, r1")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="eor r0, r0, r1")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="eor r0, r0, r1")
+
+%def op_xor_int_lit8():
+% binopLit8(extract="", instr="eor r0, r0, r3, asr #8")
+
+%def op_xor_long():
+% binopWide(preinstr="eor r0, r0, r2", instr="eor r1, r1, r3")
+
+%def op_xor_long_2addr():
+% binopWide2addr(preinstr="eor r0, r0, r2", instr="eor r1, r1, r3")
diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
new file mode 100644
index 0000000..88d89c5
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/array.S
@@ -0,0 +1,250 @@
+%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r2, r9 @ vAA<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_aget_boolean():
+% op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ bl artAGetObjectFromMterp @ (array, index)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ PREFETCH_INST 2
+ cmp r1, #0
+ bne MterpException
+ SET_VREG_OBJECT r0, r9
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_aget_short():
+% op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
+ ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B r2, 1, 0 @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B r3, 1, 1 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r9 @ r2<- vAA
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ $store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_aput_boolean():
+% op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpAputObject
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_aput_short():
+% op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG r0, r2 @ r0<- vBB (array object)
+ GET_VREG r1, r3 @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_array_length():
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ GET_VREG r0, r1 @ r0<- vB (object ref)
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r3, r2 @ vB<- length
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG r0, r3 @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData @ (obj, payload)
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq MterpPossibleException @ exception?
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rSELF
+ bl $helper
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ mov r3, rSELF
+ bl MterpNewArray
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/bincmp.S b/runtime/interpreter/mterp/arm/bincmp.S
deleted file mode 100644
index 8fad42f..0000000
--- a/runtime/interpreter/mterp/arm/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- b${condition} MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/binop.S b/runtime/interpreter/mterp/arm/binop.S
deleted file mode 100644
index eeb72ef..0000000
--- a/runtime/interpreter/mterp/arm/binop.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"preinstr":"", "result":"r0", "chkzero":"0"}
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/binop2addr.S b/runtime/interpreter/mterp/arm/binop2addr.S
deleted file mode 100644
index d09a43a..0000000
--- a/runtime/interpreter/mterp/arm/binop2addr.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {"preinstr":"", "result":"r0", "chkzero":"0"}
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- $preinstr @ optional op; may set condition codes
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit16.S b/runtime/interpreter/mterp/arm/binopLit16.S
deleted file mode 100644
index 065394e..0000000
--- a/runtime/interpreter/mterp/arm/binopLit16.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"r0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if $chkzero
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S
deleted file mode 100644
index 7c9c631..0000000
--- a/runtime/interpreter/mterp/arm/binopLit8.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"extract":"asr r1, r3, #8", "result":"r0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- $extract @ optional; typically r1<- ssssssCC (sign extended)
- .if $chkzero
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- $instr @ $result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG $result, r9 @ vAA<- $result
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
deleted file mode 100644
index 4d88001..0000000
--- a/runtime/interpreter/mterp/arm/binopWide.S
+++ /dev/null
@@ -1,38 +0,0 @@
-%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if $chkzero
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
deleted file mode 100644
index bb16335..0000000
--- a/runtime/interpreter/mterp/arm/binopWide2addr.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if $chkzero
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
diff --git a/runtime/interpreter/mterp/arm/const.S b/runtime/interpreter/mterp/arm/const.S
deleted file mode 100644
index f6f8157..0000000
--- a/runtime/interpreter/mterp/arm/const.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl $helper @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
new file mode 100644
index 0000000..51832e1
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/control_flow.S
@@ -0,0 +1,209 @@
+%def bincmp(condition=""):
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r1, rINST, lsr #12 @ r1<- B
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ GET_VREG r3, r1 @ r3<- vB
+ GET_VREG r0, r0 @ r0<- vA
+ FETCH_S rINST, 1 @ rINST<- branch offset, in code units
+ cmp r0, r3 @ compare (vA, vB)
+ b${condition} MterpCommonTakenBranchNoFlags
+ cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
+ beq .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def zcmp(condition=""):
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG r0, r0 @ r0<- vAA
+ FETCH_S rINST, 1 @ rINST<- branch offset, in code units
+ cmp r0, #0 @ compare (vA, 0)
+ b${condition} MterpCommonTakenBranchNoFlags
+ cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
+ beq .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_goto():
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH r0, 1 @ r0<- aaaa (lo)
+ FETCH r3, 2 @ r1<- AAAA (hi)
+ orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
+ b MterpCommonTakenBranch
+
+%def op_if_eq():
+% bincmp(condition="eq")
+
+%def op_if_eqz():
+% zcmp(condition="eq")
+
+%def op_if_ge():
+% bincmp(condition="ge")
+
+%def op_if_gez():
+% zcmp(condition="ge")
+
+%def op_if_gt():
+% bincmp(condition="gt")
+
+%def op_if_gtz():
+% zcmp(condition="gt")
+
+%def op_if_le():
+% bincmp(condition="le")
+
+%def op_if_lez():
+% zcmp(condition="le")
+
+%def op_if_lt():
+% bincmp(condition="lt")
+
+%def op_if_ltz():
+% zcmp(condition="lt")
+
+%def op_if_ne():
+% bincmp(condition="ne")
+
+%def op_if_nez():
+% zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH r0, 1 @ r0<- bbbb (lo)
+ FETCH r1, 2 @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG r1, r3 @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl $func @ r0<- code-unit branch offset
+ movs rINST, r0
+ b MterpCommonTakenBranch
+
+%def op_return():
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rSELF
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ blne MterpSuspendCheck @ (self)
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA
+ mov r1, #0
+ b MterpReturn
+
+%def op_return_object():
+% op_return()
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rSELF
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ blne MterpSuspendCheck @ (self)
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+%def op_return_void_no_barrier():
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rSELF
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ blne MterpSuspendCheck @ (self)
+ mov r0, #0
+ mov r1, #0
+ b MterpReturn
+
+%def op_return_wide():
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ mov r0, rSELF
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ blne MterpSuspendCheck @ (self)
+ mov r2, rINST, lsr #8 @ r2<- AA
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ b MterpReturn
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r1, r2 @ r1<- vAA (exception object)
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S
deleted file mode 100644
index 7c7c527..0000000
--- a/runtime/interpreter/mterp/arm/entry.S
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
-
-/*
- * On entry:
- * r0 Thread* self/
- * r1 insns_
- * r2 ShadowFrame
- * r3 JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
- stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r3, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r6, 12
- .cfi_rel_offset r7, 16
- .cfi_rel_offset r8, 20
- .cfi_rel_offset r9, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset fp, 32
- .cfi_rel_offset lr, 36
-
- /* Remember the return register */
- str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov rSELF, r0
- ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
- VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
- ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- bl MterpSetUpHotnessCountdown
- mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST @ load rINST from rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm/fallback.S b/runtime/interpreter/mterp/arm/fallback.S
deleted file mode 100644
index 44e7e12..0000000
--- a/runtime/interpreter/mterp/arm/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
- b MterpFallback
-
diff --git a/runtime/interpreter/mterp/arm/fbinop.S b/runtime/interpreter/mterp/arm/fbinop.S
deleted file mode 100644
index 594ee03..0000000
--- a/runtime/interpreter/mterp/arm/fbinop.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $instr @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S
deleted file mode 100644
index 53c87a0..0000000
--- a/runtime/interpreter/mterp/arm/fbinop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- $instr @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
deleted file mode 100644
index ca13bfb..0000000
--- a/runtime/interpreter/mterp/arm/fbinopWide.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $instr @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
deleted file mode 100644
index 9766e2c..0000000
--- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- $instr @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
new file mode 100644
index 0000000..6bf54e8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/floating_point.S
@@ -0,0 +1,484 @@
+%def fbinop(instr=""):
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def fbinop2addr(instr=""):
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ flds s1, [r3] @ s1<- vB
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+ $instr @ s2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def fbinopWide(instr=""):
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $instr @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def fbinopWide2addr(instr=""):
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+ $instr @ d2<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def funop(instr=""):
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ s1<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def funopNarrower(instr=""):
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ s0<- op
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def funopWider(instr=""):
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ $instr @ d0<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_add_double():
+% fbinopWide(instr="faddd d2, d0, d1")
+
+%def op_add_double_2addr():
+% fbinopWide2addr(instr="faddd d2, d0, d1")
+
+%def op_add_float():
+% fbinop(instr="fadds s2, s0, s1")
+
+%def op_add_float_2addr():
+% fbinop2addr(instr="fadds s2, s0, s1")
+
+%def op_cmpg_double():
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ vcmpe.f64 d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_cmpg_float():
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ vcmpe.f32 s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_cmpl_double():
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ vcmpe.f64 d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_cmpl_float():
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH r0, 1 @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ vcmpe.f32 s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ SET_VREG r0, r9 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_div_double():
+% fbinopWide(instr="fdivd d2, d0, d1")
+
+%def op_div_double_2addr():
+% fbinopWide2addr(instr="fdivd d2, d0, d1")
+
+%def op_div_float():
+% fbinop(instr="fdivs s2, s0, s1")
+
+%def op_div_float_2addr():
+% fbinop2addr(instr="fdivs s2, s0, s1")
+
+%def op_double_to_float():
+% funopNarrower(instr="vcvt.f32.f64 s0, d0")
+
+%def op_double_to_int():
+% funopNarrower(instr="ftosizd s0, d0")
+
+%def op_double_to_long():
+% unopWide(instr="bl d2l_doconv")
+% add_helper(op_double_to_long_helper)
+
+%def op_double_to_long_helper():
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ ubfx r2, r1, #20, #11 @ grab the exponent
+ movw r3, #0x43e
+ cmp r2, r3 @ MINLONG < x > MAXLONG?
+ bhs d2l_special_cases
+ b __aeabi_d2lz @ tail call to convert double to long
+d2l_special_cases:
+ movw r3, #0x7ff
+ cmp r2, r3
+ beq d2l_maybeNaN @ NaN?
+d2l_notNaN:
+ adds r1, r1, r1 @ sign bit to carry
+ mov r0, #0xffffffff @ assume maxlong for lsw
+ mov r1, #0x7fffffff @ assume maxlong for msw
+ adc r0, r0, #0
+ adc r1, r1, #0 @ convert maxlong to minlong if exp negative
+ bx lr @ return
+d2l_maybeNaN:
+ orrs r3, r0, r1, lsl #12
+ beq d2l_notNaN @ if fraction is non-zero, it's a NaN
+ mov r0, #0
+ mov r1, #0
+ bx lr @ return 0 for NaN
+
+%def op_float_to_double():
+% funopWider(instr="vcvt.f64.f32 d0, s0")
+
+%def op_float_to_int():
+% funop(instr="ftosizs s1, s0")
+
+%def op_float_to_long():
+% unopWider(instr="bl f2l_doconv")
+% add_helper(op_float_to_long_helper)
+
+%def op_float_to_long_helper():
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ ubfx r2, r0, #23, #8 @ grab the exponent
+ cmp r2, #0xbe @ MININT < x > MAXINT?
+ bhs f2l_special_cases
+ b __aeabi_f2lz @ tail call to convert float to long
+f2l_special_cases:
+ cmp r2, #0xff @ NaN or infinity?
+ beq f2l_maybeNaN
+f2l_notNaN:
+ adds r0, r0, r0 @ sign bit to carry
+ mov r0, #0xffffffff @ assume maxlong for lsw
+ mov r1, #0x7fffffff @ assume maxlong for msw
+ adc r0, r0, #0
+ adc r1, r1, #0 @ convert maxlong to minlong if exp negative
+ bx lr @ return
+f2l_maybeNaN:
+ lsls r3, r0, #9
+ beq f2l_notNaN @ if fraction is non-zero, it's a NaN
+ mov r0, #0
+ mov r1, #0
+ bx lr @ return 0 for NaN
+
+%def op_int_to_double():
+% funopWider(instr="fsitod d0, s0")
+
+%def op_int_to_float():
+% funop(instr="fsitos s1, s0")
+
+%def op_long_to_double():
+ /*
+ * Specialised 64-bit floating point operation.
+ *
+ * Note: The result will be returned in d2.
+ *
+ * For: long-to-double
+ */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
+ vldr d0, [r3] @ d0<- vAA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+
+ vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
+ vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
+ vldr d3, constval$opcode
+ vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
+
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ vstr.64 d2, [r9] @ vAA<- d2
+ GOTO_OPCODE ip @ jump to next instruction
+
+ /* literal pool helper */
+constval${opcode}:
+ .8byte 0x41f0000000000000
+
+%def op_long_to_float():
+% unopNarrower(instr="bl __aeabi_l2f")
+
+%def op_mul_double():
+% fbinopWide(instr="fmuld d2, d0, d1")
+
+%def op_mul_double_2addr():
+% fbinopWide2addr(instr="fmuld d2, d0, d1")
+
+%def op_mul_float():
+% fbinop(instr="fmuls s2, s0, s1")
+
+%def op_mul_float_2addr():
+% fbinop2addr(instr="fmuls s2, s0, s1")
+
+%def op_neg_double():
+% unopWide(instr="add r1, r1, #0x80000000")
+
+%def op_neg_float():
+% unop(instr="add r0, r0, #0x80000000")
+
+%def op_rem_double():
+/* EABI doesn't define a double remainder function, but libm does */
+% binopWide(instr="bl fmod")
+
+%def op_rem_double_2addr():
+/* EABI doesn't define a double remainder function, but libm does */
+% binopWide2addr(instr="bl fmod")
+
+%def op_rem_float():
+/* EABI doesn't define a float remainder function, but libm does */
+% binop(instr="bl fmodf")
+
+%def op_rem_float_2addr():
+/* EABI doesn't define a float remainder function, but libm does */
+% binop2addr(instr="bl fmodf")
+
+%def op_sub_double():
+% fbinopWide(instr="fsubd d2, d0, d1")
+
+%def op_sub_double_2addr():
+% fbinopWide2addr(instr="fsubd d2, d0, d1")
+
+%def op_sub_float():
+% fbinop(instr="fsubs s2, s0, s1")
+
+%def op_sub_float_2addr():
+% fbinop2addr(instr="fsubs s2, s0, s1")
diff --git a/runtime/interpreter/mterp/arm/footer.S b/runtime/interpreter/mterp/arm/footer.S
deleted file mode 100644
index 8e9c3c2..0000000
--- a/runtime/interpreter/mterp/arm/footer.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
- cmp r0, #0 @ Exception pending?
- beq MterpFallback @ If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException @ (self, shadow_frame)
- cmp r0, #0
- beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr r1, [rFP, #OFF_FP_DEX_PC]
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp rINST, #0
-MterpCommonTakenBranch:
- bgt .L_forward_branch @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmp rPROFILE, #JIT_CHECK_OSR
- beq .L_osr_check
- subsgt rPROFILE, #1
- beq .L_add_batch @ counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- REFRESH_IBASE
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bne .L_suspend_request_pending
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov r0, rSELF
- bl MterpSuspendCheck @ (self)
- cmp r0, #0
- bne MterpFallback
- REFRESH_IBASE @ might have changed during suspend
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_no_count_backwards:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- bne .L_resume_backward_branch
-.L_osr_check:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_osr_forward
-.L_resume_forward_branch:
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_check_osr_forward:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add r1, rFP, #OFF_FP_SHADOWFRAME
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr r0, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- bl MterpLogOSR
-#endif
- mov r0, #1 @ Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov r0, #0 @ signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov r0, #1 @ signal return to caller.
- b MterpDone
-MterpReturn:
- ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
- str r0, [r2]
- str r1, [r2, #4]
- mov r0, #1 @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp rPROFILE, #0
- bgt MterpProfileActive @ if > 0, we may have some counts to report.
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
-MterpProfileActive:
- mov rINST, r0 @ stash return value
- /* Report cached hotness counts */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov r0, rINST @ restore return value
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
- END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S
deleted file mode 100644
index 1b8bb8b..0000000
--- a/runtime/interpreter/mterp/arm/funop.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s1, [r9] @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S
deleted file mode 100644
index b9f758b..0000000
--- a/runtime/interpreter/mterp/arm/funopNarrower.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s0, [r9] @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
deleted file mode 100644
index 854cdc9..0000000
--- a/runtime/interpreter/mterp/arm/funopWider.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
deleted file mode 100644
index 8d9cab5..0000000
--- a/runtime/interpreter/mterp/arm/header.S
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them. If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending". Only the arguments that don't fit in the first 4
-registers are placed on the stack. "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- r4 rPC interpreted program counter, used for fetching instructions
- r5 rFP interpreted frame pointer, used for accessing locals and args
- r6 rSELF self (Thread) pointer
- r7 rINST first 16-bit code unit of current instruction
- r8 rIBASE interpreted instruction base pointer, used for computed goto
- r10 rPROFILE branch profiling countdown
- r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC r4
-#define CFI_DEX 4 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define rFP r5
-#define rSELF r6
-#define rINST r7
-#define rIBASE r8
-#define rPROFILE r10
-#define rREFS r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- sub \tmp, rPC, \tmp
- asr \tmp, #1
- str \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- ldrh rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- add rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg. Updates
- * rPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
- ldrh rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
- add pc, rIBASE, \reg, lsl #${handler_size_bits}
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add pc, \base, \reg, lsl #${handler_size_bits}
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [rFP, \vreg, lsl #2]
- mov \reg, #0
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [rFP, \vreg, lsl #2]
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
- mov \tmp1, #0
- add \tmp2, \vreg, #1
- SET_VREG_SHADOW \tmp1, \vreg
- SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .arm
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
-.endm
-
-.macro END name
- .fnend
- .cfi_endproc
- .size \name, .-\name
-.endm
diff --git a/runtime/interpreter/mterp/arm/instruction_end.S b/runtime/interpreter/mterp/arm/instruction_end.S
deleted file mode 100644
index f90ebd0..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_alt.S b/runtime/interpreter/mterp/arm/instruction_end_alt.S
deleted file mode 100644
index 0b66dbb..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_end_sister.S b/runtime/interpreter/mterp/arm/instruction_end_sister.S
deleted file mode 100644
index 71c0300..0000000
--- a/runtime/interpreter/mterp/arm/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmSisterEnd, #object
- .hidden artMterpAsmSisterEnd
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm/instruction_start.S b/runtime/interpreter/mterp/arm/instruction_start.S
deleted file mode 100644
index b7e9cf5..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_alt.S b/runtime/interpreter/mterp/arm/instruction_start_alt.S
deleted file mode 100644
index 7a67ba0..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
diff --git a/runtime/interpreter/mterp/arm/instruction_start_sister.S b/runtime/interpreter/mterp/arm/instruction_start_sister.S
deleted file mode 100644
index 0036061..0000000
--- a/runtime/interpreter/mterp/arm/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
- .type artMterpAsmSisterStart, #object
- .hidden artMterpAsmSisterStart
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/arm/invoke.S b/runtime/interpreter/mterp/arm/invoke.S
index e47dd1b..8693d3b 100644
--- a/runtime/interpreter/mterp/arm/invoke.S
+++ b/runtime/interpreter/mterp/arm/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -20,3 +20,102 @@
GET_INST_OPCODE ip
GOTO_OPCODE ip
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ mov r3, rINST
+ bl $helper
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+ /*
+ * Handle an invoke-custom invocation.
+ *
+ * for: invoke-custom, invoke-custom/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/arm/invoke_polymorphic.S b/runtime/interpreter/mterp/arm/invoke_polymorphic.S
deleted file mode 100644
index f569d61..0000000
--- a/runtime/interpreter/mterp/arm/invoke_polymorphic.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl $helper
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
new file mode 100644
index 0000000..f5fdf14
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -0,0 +1,750 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+ r10 rPROFILE branch profiling countdown
+ r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define CFI_DEX 4 // DWARF register number of the register holding dex-pc (xPC).
+#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+#define rPROFILE r10
+#define rREFS r11
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+.macro EXPORT_DEX_PC tmp
+ ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
+ str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
+ sub \tmp, rPC, \tmp
+ asr \tmp, #1
+ str \tmp, [rFP, #OFF_FP_DEX_PC]
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ ldrh rINST, [rPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh rINST, [rPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh rINST, [rPC, #((\count)*2)]
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ add rPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ ldrh rINST, [rPC, \reg]!
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [rPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [rPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [rPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+.macro GOTO_OPCODE reg
+ add pc, rIBASE, \reg, lsl #${handler_size_bits}
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add pc, \base, \reg, lsl #${handler_size_bits}
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [rFP, \vreg, lsl #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [rFP, \vreg, lsl #2]
+ mov \reg, #0
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_WIDE regLo, regHi, vreg
+ add ip, rFP, \vreg, lsl #2
+ strd \regLo, \regHi, [ip]
+ mov \regLo, #0
+ mov \regHi, #0
+ add ip, rREFS, \vreg, lsl #2
+ strd \regLo, \regHi, [ip]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [rFP, \vreg, lsl #2]
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+.macro SET_VREG_SHADOW reg, vreg
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+ mov \tmp1, #0
+ add \tmp2, \vreg, #1
+ SET_VREG_SHADOW \tmp1, \vreg
+ SET_VREG_SHADOW \tmp1, \tmp2
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+ .arm
+ .type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+.endm
+
+.macro END name
+ .size \name, .-\name
+.endm
+
+// Macro to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ rsb \rRef, \rRef, #0
+#endif // USE_HEAP_POISONING
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+
+/*
+ * On entry:
+ * r0 Thread* self/
+ * r1 insns_
+ * r2 ShadowFrame
+ * r3 JValue* result_register
+ *
+ */
+
+ENTRY ExecuteMterpImpl
+ .cfi_startproc
+ stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
+ .cfi_adjust_cfa_offset 40
+ .cfi_rel_offset r3, 0
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+ .cfi_rel_offset r6, 12
+ .cfi_rel_offset r7, 16
+ .cfi_rel_offset r8, 20
+ .cfi_rel_offset r9, 24
+ .cfi_rel_offset r10, 28
+ .cfi_rel_offset fp, 32
+ .cfi_rel_offset lr, 36
+
+ /* Remember the return register */
+ str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the dex instruction pointer */
+ str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
+
+ /* set up "named" registers */
+ mov rSELF, r0
+ ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
+ VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
+ ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
+ add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* Set up for backwards branches & osr profiling */
+ ldr r0, [rFP, #OFF_FP_METHOD]
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rSELF
+ bl MterpSetUpHotnessCountdown
+ mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST @ load rINST from rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+ /* NOTE: no fallthrough */
+ // cfi info continues, and covers the whole mterp implementation.
+ END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
+ .extern MterpCheckBefore
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rPC
+ b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ bl Mterp_dchecks_before_helper
+ #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+%def helpers():
+ ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ cmp r0, #0 @ Exception pending?
+ beq MterpFallback @ If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException @ (self, shadow_frame)
+ cmp r0, #0
+ beq MterpExceptionReturn @ no local catch, back to caller.
+ ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
+ ldr r1, [rFP, #OFF_FP_DEX_PC]
+ ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ bl MterpShouldSwitchInterpreters
+ cmp r0, #0
+ bne MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * rINST <= signed offset
+ * rPROFILE <= signed hotness countdown (expanded to 32 bits)
+ * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+ cmp rINST, #0
+MterpCommonTakenBranch:
+ bgt .L_forward_branch @ don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+# error "JIT_CHECK_OSR must be -1."
+#endif
+ cmp rPROFILE, #JIT_CHECK_OSR
+ beq .L_osr_check
+ subsgt rPROFILE, #1
+ beq .L_add_batch @ counted down to zero - report
+.L_resume_backward_branch:
+ ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
+ REFRESH_IBASE
+ add r2, rINST, rINST @ r2<- byte offset
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ bne .L_suspend_request_pending
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ mov r0, rSELF
+ bl MterpSuspendCheck @ (self)
+ cmp r0, #0
+ bne MterpFallback
+ REFRESH_IBASE @ might have changed during suspend
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.L_no_count_backwards:
+ cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
+ bne .L_resume_backward_branch
+.L_osr_check:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
+ cmp r0, #0
+ bne MterpOnStackReplacement
+ b .L_resume_backward_branch
+
+.L_forward_branch:
+ cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
+ beq .L_check_osr_forward
+.L_resume_forward_branch:
+ add r2, rINST, rINST @ r2<- byte offset
+ FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+.L_check_osr_forward:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
+ cmp r0, #0
+ bne MterpOnStackReplacement
+ b .L_resume_forward_branch
+
+.L_add_batch:
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ ldr r0, [rFP, #OFF_FP_METHOD]
+ mov r2, rSELF
+ bl MterpAddHotnessBatch @ (method, shadow_frame, self)
+ mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
+ b .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, #2
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
+ cmp r0, #0
+ bne MterpOnStackReplacement
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rINST
+ bl MterpLogOSR
+#endif
+ mov r0, #1 @ Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov r0, rSELF
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov r0, #0 @ signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov r0, #1 @ signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
+ str r0, [r2]
+ str r1, [r2, #4]
+ mov r0, #1 @ signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ cmp rPROFILE, #0
+ bgt MterpProfileActive @ if > 0, we may have some counts to report.
+ ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
+
+MterpProfileActive:
+ mov rINST, r0 @ stash return value
+ /* Report cached hotness counts */
+ ldr r0, [rFP, #OFF_FP_METHOD]
+ add r1, rFP, #OFF_FP_SHADOWFRAME
+ mov r2, rSELF
+ strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ bl MterpAddHotnessBatch @ (method, shadow_frame, self)
+ mov r0, rINST @ restore return value
+ ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
+
+ .cfi_endproc
+ END MterpHelpers
+
+%def instruction_end():
+
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_start():
+
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
+ .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+%def opcode_start():
+ ENTRY Mterp_${opcode}
+%def opcode_end():
+ END Mterp_${opcode}
+%def helper_start(name):
+ ENTRY ${name}
+%def helper_end(name):
+ END ${name}
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
new file mode 100644
index 0000000..092aa9e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -0,0 +1,322 @@
+%def field(helper=""):
+ /*
+ * General field read / write (iget-* iput-* sget-* sput-*).
+ */
+ .extern $helper
+ mov r0, rPC @ arg0: Instruction* inst
+ mov r1, rINST @ arg1: uint16_t inst_data
+ add r2, rFP, #OFF_FP_SHADOWFRAME @ arg2: ShadowFrame* sf
+ mov r3, rSELF @ arg3: Thread* self
+ PREFETCH_INST 2 @ prefetch next opcode
+ bl $helper
+ cmp r0, #0
+ beq MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_check_cast():
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpCheckCast @ (index, &obj, method, self)
+ PREFETCH_INST 2
+ cmp r0, #0
+ bne MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
+ @ Fast-path which gets the field offset from thread-local cache.
+ add r0, rSELF, #THREAD_INTERPRETER_CACHE_OFFSET @ cache address
+ ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index
+ add r0, r0, r1, lsl #3 @ entry address within the cache
+ ldrd r0, r1, [r0] @ entry key (pc) and value (offset)
+ mov r2, rINST, lsr #12 @ B
+ GET_VREG r2, r2 @ object we're operating on
+ cmp r0, rPC
+% slow_path_label = add_helper(lambda: field(helper))
+ bne ${slow_path_label} @ cache miss
+ cmp r2, #0
+ beq common_errNullObject @ null object
+% if is_wide:
+ ldrd r0, r1, [r2, r1] @ r0,r1 <- obj.field
+% else:
+ ${load} r0, [r2, r1] @ r0 <- obj.field
+% #endif
+% if is_object:
+ UNPOISON_HEAP_REF r0
+#if defined(USE_READ_BARRIER)
+# if defined(USE_BAKER_READ_BARRIER)
+ ldr ip, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cmp ip, #0
+ bne .L_${opcode}_mark @ GC is active
+.L_${opcode}_marked:
+# else
+ bl artReadBarrierMark @ r0 <- artReadBarrierMark(r0)
+# endif
+#endif
+% #endif
+ ubfx r2, rINST, #8, #4 @ A
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+% if is_object:
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+% elif is_wide:
+ SET_VREG_WIDE r0, r1, r2 @ fp[A]<- r0, r1
+% else:
+ SET_VREG r0, r2 @ fp[A]<- r0
+% #endif
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+% if is_object:
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+.L_${opcode}_mark:
+ bl artReadBarrierMark @ r0 <- artReadBarrierMark(r0)
+ b .L_${opcode}_marked
+#endif
+% #endif
+
+%def op_iget_boolean():
+% op_iget(load="ldrb", helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="ldrb")
+
+%def op_iget_byte():
+% op_iget(load="ldrsb", helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="ldrsb")
+
+%def op_iget_char():
+% op_iget(load="ldrh", helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="ldrh")
+
+%def op_iget_object():
+% op_iget(is_object=True, helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ EXPORT_PC
+ GET_VREG r0, r2 @ r0<- object we're operating on
+ bl artIGetObjectFromMterp @ (obj, offset)
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ PREFETCH_INST 2
+ cmp r3, #0
+ bne MterpPossibleException @ bail out
+ SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iget_quick(load="ldr"):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ $load r0, [r3, r1] @ r0<- obj.field
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r2 @ fp[A]<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iget_short():
+% op_iget(load="ldrsh", helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="ldrsh")
+
+%def op_iget_wide():
+% op_iget(is_wide=True, helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH ip, 1 @ ip<- field byte offset
+ GET_VREG r3, r2 @ r3<- object we're operating on
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
+ CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_instance_of():
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- CCCC
+ mov r1, rINST, lsr #12 @ r1<- B
+ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
+ ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
+ mov r3, rSELF @ r3<- self
+ bl MterpInstanceOf @ (index, &obj, method, self)
+ ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx r9, rINST, #8, #4 @ r9<- A
+ PREFETCH_INST 2
+ cmp r1, #0 @ exception pending?
+ bne MterpException
+ ADVANCE 2 @ advance rPC
+ SET_VREG r0, r9 @ vA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iput(helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(store="strb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(store="strb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(store="strh")
+
+%def op_iput_object():
+% op_iput(helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rPC
+ mov r2, rINST
+ bl MterpIputObjectQuick
+ cmp r0, #0
+ beq MterpException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iput_quick(store="str"):
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r1, 1 @ r1<- field byte offset
+ GET_VREG r3, r2 @ r3<- fp[B], the object pointer
+ ubfx r2, rINST, #8, #4 @ r2<- A
+ cmp r3, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ GET_VREG r0, r2 @ r0<- fp[A]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ $store r0, [r3, r1] @ obj.field<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(store="strh")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ FETCH r3, 1 @ r3<- field byte offset
+ GET_VREG r2, r2 @ r2<- fp[B], the object pointer
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ cmp r2, #0 @ check object for null
+ beq common_errNullObject @ object was null
+ VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
+ ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_new_instance():
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ add r0, rFP, #OFF_FP_SHADOWFRAME
+ mov r1, rSELF
+ mov r2, rINST
+ bl MterpNewInstance @ (shadow_frame, self, inst_data)
+ cmp r0, #0
+ beq MterpPossibleException
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_sget(helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm/op_add_double.S b/runtime/interpreter/mterp/arm/op_add_double.S
deleted file mode 100644
index 9332bf2..0000000
--- a/runtime/interpreter/mterp/arm/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_double_2addr.S b/runtime/interpreter/mterp/arm/op_add_double_2addr.S
deleted file mode 100644
index 3242c53..0000000
--- a/runtime/interpreter/mterp/arm/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"faddd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float.S b/runtime/interpreter/mterp/arm/op_add_float.S
deleted file mode 100644
index afb7967..0000000
--- a/runtime/interpreter/mterp/arm/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_float_2addr.S b/runtime/interpreter/mterp/arm/op_add_float_2addr.S
deleted file mode 100644
index 0067b6a..0000000
--- a/runtime/interpreter/mterp/arm/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fadds s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int.S b/runtime/interpreter/mterp/arm/op_add_int.S
deleted file mode 100644
index 1dcae7e..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_2addr.S b/runtime/interpreter/mterp/arm/op_add_int_2addr.S
deleted file mode 100644
index 9ea98f1..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit16.S b/runtime/interpreter/mterp/arm/op_add_int_lit16.S
deleted file mode 100644
index 5763ab8..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"add r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_add_int_lit8.S b/runtime/interpreter/mterp/arm/op_add_int_lit8.S
deleted file mode 100644
index 035510d..0000000
--- a/runtime/interpreter/mterp/arm/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"add r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long.S b/runtime/interpreter/mterp/arm/op_add_long.S
deleted file mode 100644
index 093223e..0000000
--- a/runtime/interpreter/mterp/arm/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_add_long_2addr.S b/runtime/interpreter/mterp/arm/op_add_long_2addr.S
deleted file mode 100644
index c11e0af..0000000
--- a/runtime/interpreter/mterp/arm/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aget.S b/runtime/interpreter/mterp/arm/op_aget.S
deleted file mode 100644
index 11f7079..0000000
--- a/runtime/interpreter/mterp/arm/op_aget.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_boolean.S b/runtime/interpreter/mterp/arm/op_aget_boolean.S
deleted file mode 100644
index 8f678dc..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_byte.S b/runtime/interpreter/mterp/arm/op_aget_byte.S
deleted file mode 100644
index a304650..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_char.S b/runtime/interpreter/mterp/arm/op_aget_char.S
deleted file mode 100644
index 4908306..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_object.S b/runtime/interpreter/mterp/arm/op_aget_object.S
deleted file mode 100644
index 4e0aab5..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_object.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- bl artAGetObjectFromMterp @ (array, index)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- PREFETCH_INST 2
- cmp r1, #0
- bne MterpException
- SET_VREG_OBJECT r0, r9
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aget_short.S b/runtime/interpreter/mterp/arm/op_aget_short.S
deleted file mode 100644
index b71e659..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
deleted file mode 100644
index 66ec950..0000000
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ /dev/null
@@ -1,25 +0,0 @@
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_and_int.S b/runtime/interpreter/mterp/arm/op_and_int.S
deleted file mode 100644
index 7c16d37..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_2addr.S b/runtime/interpreter/mterp/arm/op_and_int_2addr.S
deleted file mode 100644
index 0fbab02..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit16.S b/runtime/interpreter/mterp/arm/op_and_int_lit16.S
deleted file mode 100644
index 541e9b7..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"and r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_and_int_lit8.S b/runtime/interpreter/mterp/arm/op_and_int_lit8.S
deleted file mode 100644
index af746b5..0000000
--- a/runtime/interpreter/mterp/arm/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"and r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long.S b/runtime/interpreter/mterp/arm/op_and_long.S
deleted file mode 100644
index 4ad5158..0000000
--- a/runtime/interpreter/mterp/arm/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_and_long_2addr.S b/runtime/interpreter/mterp/arm/op_and_long_2addr.S
deleted file mode 100644
index e23ea44..0000000
--- a/runtime/interpreter/mterp/arm/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_aput.S b/runtime/interpreter/mterp/arm/op_aput.S
deleted file mode 100644
index a511fa5..0000000
--- a/runtime/interpreter/mterp/arm/op_aput.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- $store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_boolean.S b/runtime/interpreter/mterp/arm/op_aput_boolean.S
deleted file mode 100644
index e86663f..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_byte.S b/runtime/interpreter/mterp/arm/op_aput_byte.S
deleted file mode 100644
index 83694b7..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_char.S b/runtime/interpreter/mterp/arm/op_aput_char.S
deleted file mode 100644
index 3551cac..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_object.S b/runtime/interpreter/mterp/arm/op_aput_object.S
deleted file mode 100644
index c539916..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpAputObject
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_aput_short.S b/runtime/interpreter/mterp/arm/op_aput_short.S
deleted file mode 100644
index 0a0590e..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S
deleted file mode 100644
index 0057507..0000000
--- a/runtime/interpreter/mterp/arm/op_aput_wide.S
+++ /dev/null
@@ -1,24 +0,0 @@
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
- GET_INST_OPCODE ip @ extract opcode from rINST
- strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_array_length.S b/runtime/interpreter/mterp/arm/op_array_length.S
deleted file mode 100644
index 43b1682..0000000
--- a/runtime/interpreter/mterp/arm/op_array_length.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Return the length of an array.
- */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r0, r1 @ r0<- vB (object ref)
- cmp r0, #0 @ is object null?
- beq common_errNullObject @ yup, fail
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r3, r2 @ vB<- length
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_check_cast.S b/runtime/interpreter/mterp/arm/op_check_cast.S
deleted file mode 100644
index 24eba45..0000000
--- a/runtime/interpreter/mterp/arm/op_check_cast.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpCheckCast @ (index, &obj, method, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S
deleted file mode 100644
index 6626ff0..0000000
--- a/runtime/interpreter/mterp/arm/op_cmp_long.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- cmp r0, r2
- sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
- mov ip, #0
- mvnlt ip, #0 @ -1
- cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
- orrne ip, #1
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG ip, r9 @ vAA<- ip
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_double.S b/runtime/interpreter/mterp/arm/op_cmpg_double.S
deleted file mode 100644
index 602a4b1..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpg_double.S
+++ /dev/null
@@ -1,34 +0,0 @@
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpg_float.S b/runtime/interpreter/mterp/arm/op_cmpg_float.S
deleted file mode 100644
index 965091f..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpg_float.S
+++ /dev/null
@@ -1,34 +0,0 @@
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_double.S b/runtime/interpreter/mterp/arm/op_cmpl_double.S
deleted file mode 100644
index 8a5e509..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpl_double.S
+++ /dev/null
@@ -1,34 +0,0 @@
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_cmpl_float.S b/runtime/interpreter/mterp/arm/op_cmpl_float.S
deleted file mode 100644
index 9df0c2c..0000000
--- a/runtime/interpreter/mterp/arm/op_cmpl_float.S
+++ /dev/null
@@ -1,34 +0,0 @@
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S
deleted file mode 100644
index 39890a0..0000000
--- a/runtime/interpreter/mterp/arm/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const vAA, #+BBBBbbbb */
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r3 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S
deleted file mode 100644
index a30cf3a..0000000
--- a/runtime/interpreter/mterp/arm/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S
deleted file mode 100644
index c97b0e9..0000000
--- a/runtime/interpreter/mterp/arm/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- SET_VREG r1, r0 @ fp[A]<- r1
- GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_class.S b/runtime/interpreter/mterp/arm/op_const_class.S
deleted file mode 100644
index ff5c98c..0000000
--- a/runtime/interpreter/mterp/arm/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S
deleted file mode 100644
index 536276d..0000000
--- a/runtime/interpreter/mterp/arm/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, r0, lsl #16 @ r0<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_method_handle.S b/runtime/interpreter/mterp/arm/op_const_method_handle.S
deleted file mode 100644
index 71f0550..0000000
--- a/runtime/interpreter/mterp/arm/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm/op_const_method_type.S b/runtime/interpreter/mterp/arm/op_const_method_type.S
deleted file mode 100644
index 2cccdaf..0000000
--- a/runtime/interpreter/mterp/arm/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm/op_const_string.S b/runtime/interpreter/mterp/arm/op_const_string.S
deleted file mode 100644
index 75ec34f..0000000
--- a/runtime/interpreter/mterp/arm/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
deleted file mode 100644
index 1255c07..0000000
--- a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r2, 2 @ r2<- BBBB (high)
- mov r1, rINST, lsr #8 @ r1<- AA
- orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 @ advance rPC
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 3 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
deleted file mode 100644
index 8310a4c..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (low middle)
- FETCH r2, 3 @ r2<- hhhh (high middle)
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
- FETCH r3, 4 @ r3<- HHHH (high)
- mov r9, rINST, lsr #8 @ r9<- AA
- orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
- FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
deleted file mode 100644
index 28abb51..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r1, r0, asr #31 @ r1<- ssssssss
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
deleted file mode 100644
index c10bb04..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_32.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH r0, 1 @ r0<- 0000bbbb (low)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_S r2, 2 @ r2<- ssssBBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- mov r1, r0, asr #31 @ r1<- ssssssss
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
deleted file mode 100644
index d7e38ec..0000000
--- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, #0 @ r0<- 00000000
- mov r1, r1, lsl #16 @ r1<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_div_double.S b/runtime/interpreter/mterp/arm/op_div_double.S
deleted file mode 100644
index 5147550..0000000
--- a/runtime/interpreter/mterp/arm/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_double_2addr.S b/runtime/interpreter/mterp/arm/op_div_double_2addr.S
deleted file mode 100644
index b812f17..0000000
--- a/runtime/interpreter/mterp/arm/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float.S b/runtime/interpreter/mterp/arm/op_div_float.S
deleted file mode 100644
index 0f24d11..0000000
--- a/runtime/interpreter/mterp/arm/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_float_2addr.S b/runtime/interpreter/mterp/arm/op_div_float_2addr.S
deleted file mode 100644
index a1dbf01..0000000
--- a/runtime/interpreter/mterp/arm/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_int.S b/runtime/interpreter/mterp/arm/op_div_int.S
deleted file mode 100644
index 251064b..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_2addr.S b/runtime/interpreter/mterp/arm/op_div_int_2addr.S
deleted file mode 100644
index 9be4cd8..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit16.S b/runtime/interpreter/mterp/arm/op_div_int_lit16.S
deleted file mode 100644
index d9bc7d6..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_lit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_int_lit8.S b/runtime/interpreter/mterp/arm/op_div_int_lit8.S
deleted file mode 100644
index 5d2dbd3..0000000
--- a/runtime/interpreter/mterp/arm/op_div_int_lit8.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_div_long.S b/runtime/interpreter/mterp/arm/op_div_long.S
deleted file mode 100644
index 0f21a84..0000000
--- a/runtime/interpreter/mterp/arm/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_div_long_2addr.S b/runtime/interpreter/mterp/arm/op_div_long_2addr.S
deleted file mode 100644
index e172b29..0000000
--- a/runtime/interpreter/mterp/arm/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_float.S b/runtime/interpreter/mterp/arm/op_double_to_float.S
deleted file mode 100644
index 98fdfbc..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopNarrower.S" {"instr":"vcvt.f32.f64 s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_int.S b/runtime/interpreter/mterp/arm/op_double_to_int.S
deleted file mode 100644
index aa035de..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopNarrower.S" {"instr":"ftosizd s0, d0"}
diff --git a/runtime/interpreter/mterp/arm/op_double_to_long.S b/runtime/interpreter/mterp/arm/op_double_to_long.S
deleted file mode 100644
index 19ff723..0000000
--- a/runtime/interpreter/mterp/arm/op_double_to_long.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%include "arm/unopWide.S" {"instr":"bl d2l_doconv"}
-
-%break
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
- movw r3, #0x43e
- cmp r2, r3 @ MINLONG < x > MAXLONG?
- bhs d2l_special_cases
- b __aeabi_d2lz @ tail call to convert double to long
-d2l_special_cases:
- movw r3, #0x7ff
- cmp r2, r3
- beq d2l_maybeNaN @ NaN?
-d2l_notNaN:
- adds r1, r1, r1 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-d2l_maybeNaN:
- orrs r3, r0, r1, lsl #12
- beq d2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
diff --git a/runtime/interpreter/mterp/arm/op_fill_array_data.S b/runtime/interpreter/mterp/arm/op_fill_array_data.S
deleted file mode 100644
index e1ca85c..0000000
--- a/runtime/interpreter/mterp/arm/op_fill_array_data.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
- GET_VREG r0, r3 @ r0<- vAA (array object)
- add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
- bl MterpFillArrayData @ (obj, payload)
- cmp r0, #0 @ 0 means an exception is thrown
- beq MterpPossibleException @ exception?
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array.S b/runtime/interpreter/mterp/arm/op_filled_new_array.S
deleted file mode 100644
index 1075f0c..0000000
--- a/runtime/interpreter/mterp/arm/op_filled_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl $helper
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
deleted file mode 100644
index 16567af..0000000
--- a/runtime/interpreter/mterp/arm/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm/op_float_to_double.S b/runtime/interpreter/mterp/arm/op_float_to_double.S
deleted file mode 100644
index b1e12bd..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopWider.S" {"instr":"vcvt.f64.f32 d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_int.S b/runtime/interpreter/mterp/arm/op_float_to_int.S
deleted file mode 100644
index aab8716..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funop.S" {"instr":"ftosizs s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_float_to_long.S b/runtime/interpreter/mterp/arm/op_float_to_long.S
deleted file mode 100644
index 42bd98d..0000000
--- a/runtime/interpreter/mterp/arm/op_float_to_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
-%include "arm/unopWider.S" {"instr":"bl f2l_doconv"}
-
-%break
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
- cmp r2, #0xbe @ MININT < x > MAXINT?
- bhs f2l_special_cases
- b __aeabi_f2lz @ tail call to convert float to long
-f2l_special_cases:
- cmp r2, #0xff @ NaN or infinity?
- beq f2l_maybeNaN
-f2l_notNaN:
- adds r0, r0, r0 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-f2l_maybeNaN:
- lsls r3, r0, #9
- beq f2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
diff --git a/runtime/interpreter/mterp/arm/op_goto.S b/runtime/interpreter/mterp/arm/op_goto.S
deleted file mode 100644
index aa42dfd..0000000
--- a/runtime/interpreter/mterp/arm/op_goto.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm/op_goto_16.S b/runtime/interpreter/mterp/arm/op_goto_16.S
deleted file mode 100644
index 12a6bc0..0000000
--- a/runtime/interpreter/mterp/arm/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm/op_goto_32.S b/runtime/interpreter/mterp/arm/op_goto_32.S
deleted file mode 100644
index 7325a1c..0000000
--- a/runtime/interpreter/mterp/arm/op_goto_32.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH r0, 1 @ r0<- aaaa (lo)
- FETCH r3, 2 @ r1<- AAAA (hi)
- orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
- b MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/arm/op_if_eq.S b/runtime/interpreter/mterp/arm/op_if_eq.S
deleted file mode 100644
index b8b6a6e..0000000
--- a/runtime/interpreter/mterp/arm/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_eqz.S b/runtime/interpreter/mterp/arm/op_if_eqz.S
deleted file mode 100644
index 7012f61..0000000
--- a/runtime/interpreter/mterp/arm/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ge.S b/runtime/interpreter/mterp/arm/op_if_ge.S
deleted file mode 100644
index eb29e63..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gez.S b/runtime/interpreter/mterp/arm/op_if_gez.S
deleted file mode 100644
index d9da374..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gt.S b/runtime/interpreter/mterp/arm/op_if_gt.S
deleted file mode 100644
index a35eab8..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_gtz.S b/runtime/interpreter/mterp/arm/op_if_gtz.S
deleted file mode 100644
index 4ef4d8e..0000000
--- a/runtime/interpreter/mterp/arm/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_le.S b/runtime/interpreter/mterp/arm/op_if_le.S
deleted file mode 100644
index c7c31bc..0000000
--- a/runtime/interpreter/mterp/arm/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lez.S b/runtime/interpreter/mterp/arm/op_if_lez.S
deleted file mode 100644
index 9fbf6c9..0000000
--- a/runtime/interpreter/mterp/arm/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm/op_if_lt.S b/runtime/interpreter/mterp/arm/op_if_lt.S
deleted file mode 100644
index 9469fbb..0000000
--- a/runtime/interpreter/mterp/arm/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ltz.S b/runtime/interpreter/mterp/arm/op_if_ltz.S
deleted file mode 100644
index a4fc1b8..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm/op_if_ne.S b/runtime/interpreter/mterp/arm/op_if_ne.S
deleted file mode 100644
index c945331..0000000
--- a/runtime/interpreter/mterp/arm/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_if_nez.S b/runtime/interpreter/mterp/arm/op_if_nez.S
deleted file mode 100644
index 2d81fda..0000000
--- a/runtime/interpreter/mterp/arm/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm/op_iget.S b/runtime/interpreter/mterp/arm/op_iget.S
deleted file mode 100644
index 1684a76..0000000
--- a/runtime/interpreter/mterp/arm/op_iget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl $helper
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if $is_object
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean.S b/runtime/interpreter/mterp/arm/op_iget_boolean.S
deleted file mode 100644
index f23cb3a..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
deleted file mode 100644
index 0ae4843..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte.S b/runtime/interpreter/mterp/arm/op_iget_byte.S
deleted file mode 100644
index 9c4f37c..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
deleted file mode 100644
index e1b3083..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char.S b/runtime/interpreter/mterp/arm/op_iget_char.S
deleted file mode 100644
index 80c4227..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_char_quick.S b/runtime/interpreter/mterp/arm/op_iget_char_quick.S
deleted file mode 100644
index b44d8f1..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object.S b/runtime/interpreter/mterp/arm/op_iget_object.S
deleted file mode 100644
index e30b129..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_object_quick.S b/runtime/interpreter/mterp/arm/op_iget_object_quick.S
deleted file mode 100644
index 16cb118..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_object_quick.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- object we're operating on
- bl artIGetObjectFromMterp @ (obj, offset)
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_quick.S b/runtime/interpreter/mterp/arm/op_iget_quick.S
deleted file mode 100644
index 0eaf364..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"ldr" }
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- $load r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_short.S b/runtime/interpreter/mterp/arm/op_iget_short.S
deleted file mode 100644
index dd6bc99..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_short_quick.S b/runtime/interpreter/mterp/arm/op_iget_short_quick.S
deleted file mode 100644
index 1831b99..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
deleted file mode 100644
index 46e9ec8..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetU64
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpException @ bail out
- CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
deleted file mode 100644
index 5a7177d..0000000
--- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* iget-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH ip, 1 @ ip<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
- CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S
deleted file mode 100644
index 019929e..0000000
--- a/runtime/interpreter/mterp/arm/op_instance_of.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- FETCH r0, 1 @ r0<- CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpInstanceOf @ (index, &obj, method, self)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
- PREFETCH_INST 2
- cmp r1, #0 @ exception pending?
- bne MterpException
- ADVANCE 2 @ advance rPC
- SET_VREG r0, r9 @ vA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_int_to_byte.S b/runtime/interpreter/mterp/arm/op_int_to_byte.S
deleted file mode 100644
index 059d5c2..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"sxtb r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_char.S b/runtime/interpreter/mterp/arm/op_int_to_char.S
deleted file mode 100644
index 83a0c19..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"uxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_double.S b/runtime/interpreter/mterp/arm/op_int_to_double.S
deleted file mode 100644
index 810c2e4..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funopWider.S" {"instr":"fsitod d0, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_float.S b/runtime/interpreter/mterp/arm/op_int_to_float.S
deleted file mode 100644
index f41654c..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/funop.S" {"instr":"fsitos s1, s0"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_long.S b/runtime/interpreter/mterp/arm/op_int_to_long.S
deleted file mode 100644
index b5aed8e..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWider.S" {"instr":"mov r1, r0, asr #31"}
diff --git a/runtime/interpreter/mterp/arm/op_int_to_short.S b/runtime/interpreter/mterp/arm/op_int_to_short.S
deleted file mode 100644
index 717bd96..0000000
--- a/runtime/interpreter/mterp/arm/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"sxth r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom.S b/runtime/interpreter/mterp/arm/op_invoke_custom.S
deleted file mode 100644
index 2af875c..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_custom.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeCustom" }
- /*
- * Handle an invoke-custom invocation.
- *
- * for: invoke-custom, invoke-custom/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
deleted file mode 100644
index 32575c4..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct.S b/runtime/interpreter/mterp/arm/op_invoke_direct.S
deleted file mode 100644
index 1edf221..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
deleted file mode 100644
index 3097b8e..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface.S b/runtime/interpreter/mterp/arm/op_invoke_interface.S
deleted file mode 100644
index f6d565b..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeInterface" }
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
deleted file mode 100644
index c8443b0..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
deleted file mode 100644
index 816a7ae..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
deleted file mode 100644
index 2541c27..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static.S b/runtime/interpreter/mterp/arm/op_invoke_static.S
deleted file mode 100644
index c3cefcf..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/arm/op_invoke_static_range.S b/runtime/interpreter/mterp/arm/op_invoke_static_range.S
deleted file mode 100644
index dd60d7b..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super.S b/runtime/interpreter/mterp/arm/op_invoke_super.S
deleted file mode 100644
index 92ef2a4..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeSuper" }
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_super_range.S b/runtime/interpreter/mterp/arm/op_invoke_super_range.S
deleted file mode 100644
index 9e4fb1c..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual.S b/runtime/interpreter/mterp/arm/op_invoke_virtual.S
deleted file mode 100644
index 5b893ff..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtual" }
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
deleted file mode 100644
index 020e8b8..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
deleted file mode 100644
index 2b42a78..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 42f2ded..0000000
--- a/runtime/interpreter/mterp/arm/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm/op_iput.S b/runtime/interpreter/mterp/arm/op_iput.S
deleted file mode 100644
index a16795d..0000000
--- a/runtime/interpreter/mterp/arm/op_iput.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern $helper
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl $helper
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean.S b/runtime/interpreter/mterp/arm/op_iput_boolean.S
deleted file mode 100644
index 57edadd..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
deleted file mode 100644
index f0a2777..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte.S b/runtime/interpreter/mterp/arm/op_iput_byte.S
deleted file mode 100644
index ab283b9..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
deleted file mode 100644
index f0a2777..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char.S b/runtime/interpreter/mterp/arm/op_iput_char.S
deleted file mode 100644
index 0fe5d96..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_char_quick.S b/runtime/interpreter/mterp/arm/op_iput_char_quick.S
deleted file mode 100644
index 5212fc3..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_object.S b/runtime/interpreter/mterp/arm/op_iput_object.S
deleted file mode 100644
index 4f401eb..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_object.S
+++ /dev/null
@@ -1,11 +0,0 @@
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpIPutObj
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_object_quick.S b/runtime/interpreter/mterp/arm/op_iput_object_quick.S
deleted file mode 100644
index 876b3da..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_object_quick.S
+++ /dev/null
@@ -1,10 +0,0 @@
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpIputObjectQuick
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_quick.S b/runtime/interpreter/mterp/arm/op_iput_quick.S
deleted file mode 100644
index 98c8150..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "store":"str" }
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- $store r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_short.S b/runtime/interpreter/mterp/arm/op_iput_short.S
deleted file mode 100644
index cc98363..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_short_quick.S b/runtime/interpreter/mterp/arm/op_iput_short_quick.S
deleted file mode 100644
index 5212fc3..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S
deleted file mode 100644
index 6a41473..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU64
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
deleted file mode 100644
index 88e6ea1..0000000
--- a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* iput-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r3, 1 @ r3<- field byte offset
- GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
- cmp r2, #0 @ check object for null
- beq common_errNullObject @ object was null
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
- ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strd r0, [r2, r3] @ obj.field<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S
deleted file mode 100644
index cac12d4..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_double.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {}
- /*
- * Specialised 64-bit floating point operation.
- *
- * Note: The result will be returned in d2.
- *
- * For: long-to-double
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- vldr d0, [r3] @ d0<- vAA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
- vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
- vldr d3, constval$opcode
- vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
-
- GET_INST_OPCODE ip @ extract opcode from rINST
- vstr.64 d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
- /* literal pool helper */
-constval${opcode}:
- .8byte 0x41f0000000000000
diff --git a/runtime/interpreter/mterp/arm/op_long_to_float.S b/runtime/interpreter/mterp/arm/op_long_to_float.S
deleted file mode 100644
index efa5a66..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopNarrower.S" {"instr":"bl __aeabi_l2f"}
diff --git a/runtime/interpreter/mterp/arm/op_long_to_int.S b/runtime/interpreter/mterp/arm/op_long_to_int.S
deleted file mode 100644
index 3e91f23..0000000
--- a/runtime/interpreter/mterp/arm/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "arm/op_move.S"
diff --git a/runtime/interpreter/mterp/arm/op_monitor_enter.S b/runtime/interpreter/mterp/arm/op_monitor_enter.S
deleted file mode 100644
index 3c34f75..0000000
--- a/runtime/interpreter/mterp/arm/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r1<- self
- bl artLockObjectFromCode
- cmp r0, #0
- bne MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_monitor_exit.S b/runtime/interpreter/mterp/arm/op_monitor_exit.S
deleted file mode 100644
index fc7cef5..0000000
--- a/runtime/interpreter/mterp/arm/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r0<- self
- bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
- cmp r0, #0 @ failed?
- bne MterpException
- FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move.S b/runtime/interpreter/mterp/arm/op_move.S
deleted file mode 100644
index dfecc24..0000000
--- a/runtime/interpreter/mterp/arm/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_16.S b/runtime/interpreter/mterp/arm/op_move_16.S
deleted file mode 100644
index 78138a2..0000000
--- a/runtime/interpreter/mterp/arm/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_exception.S b/runtime/interpreter/mterp/arm/op_move_exception.S
deleted file mode 100644
index 0242e26..0000000
--- a/runtime/interpreter/mterp/arm/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-exception vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r1, #0 @ r1<- 0
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_from16.S b/runtime/interpreter/mterp/arm/op_move_from16.S
deleted file mode 100644
index 3e79417..0000000
--- a/runtime/interpreter/mterp/arm/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_object.S b/runtime/interpreter/mterp/arm/op_move_object.S
deleted file mode 100644
index 16de57b..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_16.S b/runtime/interpreter/mterp/arm/op_move_object_16.S
deleted file mode 100644
index 2534300..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_object_from16.S b/runtime/interpreter/mterp/arm/op_move_object_from16.S
deleted file mode 100644
index 9e0cf02..0000000
--- a/runtime/interpreter/mterp/arm/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result.S b/runtime/interpreter/mterp/arm/op_move_result.S
deleted file mode 100644
index f2586a0..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_object.S b/runtime/interpreter/mterp/arm/op_move_result_object.S
deleted file mode 100644
index 643296a..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
deleted file mode 100644
index 87929ea..0000000
--- a/runtime/interpreter/mterp/arm/op_move_result_wide.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /* move-result-wide vAA */
- mov rINST, rINST, lsr #8 @ rINST<- AA
- ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- retval.j
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
deleted file mode 100644
index ff353ea..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
deleted file mode 100644
index 9812b66..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 2 @ r3<- BBBB
- FETCH r2, 1 @ r2<- AAAA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
deleted file mode 100644
index d2cc60c..0000000
--- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 1 @ r3<- BBBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_double.S b/runtime/interpreter/mterp/arm/op_mul_double.S
deleted file mode 100644
index 530e85a..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
deleted file mode 100644
index da1abc6..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float.S b/runtime/interpreter/mterp/arm/op_mul_float.S
deleted file mode 100644
index 6a72e6f..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
deleted file mode 100644
index edb5101..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int.S b/runtime/interpreter/mterp/arm/op_mul_int.S
deleted file mode 100644
index d6151d4..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binop.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
deleted file mode 100644
index 66a797d..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binop2addr.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
deleted file mode 100644
index 4e40c43..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_lit16.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binopLit16.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
deleted file mode 100644
index dbafae9..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_int_lit8.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-%include "arm/binopLit8.S" {"instr":"mul r0, r1, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
deleted file mode 100644
index 4f55280..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_long.S
+++ /dev/null
@@ -1,37 +0,0 @@
- /*
- * Signed 64-bit integer multiply.
- *
- * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
- * WX
- * x YZ
- * --------
- * ZW ZX
- * YW YX
- *
- * The low word of the result holds ZX, the high word holds
- * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
- * it doesn't fit in the low 64 bits.
- *
- * Unlike most ARM math operations, multiply instructions have
- * restrictions on using the same register more than once (Rd and Rm
- * cannot be the same).
- */
- /* mul-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST, lsr #8 @ r0<- AA
- add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
- CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
deleted file mode 100644
index 4c1f058..0000000
--- a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S
+++ /dev/null
@@ -1,24 +0,0 @@
- /*
- * Signed 64-bit integer multiply, "/2addr" version.
- *
- * See op_mul_long for an explanation.
- *
- * We get a little tight on registers, so to avoid looking up &fp[A]
- * again we stuff it into rINST.
- */
- /* mul-long/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST @ r0<- &fp[A] (free up rINST)
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_neg_double.S b/runtime/interpreter/mterp/arm/op_neg_double.S
deleted file mode 100644
index 33e609c..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"instr":"add r1, r1, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_float.S b/runtime/interpreter/mterp/arm/op_neg_float.S
deleted file mode 100644
index 993583f..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"add r0, r0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_int.S b/runtime/interpreter/mterp/arm/op_neg_int.S
deleted file mode 100644
index ec0b253..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"rsb r0, r0, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_neg_long.S b/runtime/interpreter/mterp/arm/op_neg_long.S
deleted file mode 100644
index dab2eb4..0000000
--- a/runtime/interpreter/mterp/arm/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"preinstr":"rsbs r0, r0, #0", "instr":"rsc r1, r1, #0"}
diff --git a/runtime/interpreter/mterp/arm/op_new_array.S b/runtime/interpreter/mterp/arm/op_new_array.S
deleted file mode 100644
index 8bb792c..0000000
--- a/runtime/interpreter/mterp/arm/op_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_new_instance.S b/runtime/interpreter/mterp/arm/op_new_instance.S
deleted file mode 100644
index 95d4be8..0000000
--- a/runtime/interpreter/mterp/arm/op_new_instance.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rSELF
- mov r2, rINST
- bl MterpNewInstance @ (shadow_frame, self, inst_data)
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_nop.S b/runtime/interpreter/mterp/arm/op_nop.S
deleted file mode 100644
index af0f88f..0000000
--- a/runtime/interpreter/mterp/arm/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
- FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- GOTO_OPCODE ip @ execute it
diff --git a/runtime/interpreter/mterp/arm/op_not_int.S b/runtime/interpreter/mterp/arm/op_not_int.S
deleted file mode 100644
index 816485a..0000000
--- a/runtime/interpreter/mterp/arm/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unop.S" {"instr":"mvn r0, r0"}
diff --git a/runtime/interpreter/mterp/arm/op_not_long.S b/runtime/interpreter/mterp/arm/op_not_long.S
deleted file mode 100644
index 49a5905..0000000
--- a/runtime/interpreter/mterp/arm/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unopWide.S" {"preinstr":"mvn r0, r0", "instr":"mvn r1, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int.S b/runtime/interpreter/mterp/arm/op_or_int.S
deleted file mode 100644
index b046e8d..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_2addr.S b/runtime/interpreter/mterp/arm/op_or_int_2addr.S
deleted file mode 100644
index 493c59f..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit16.S b/runtime/interpreter/mterp/arm/op_or_int_lit16.S
deleted file mode 100644
index 0a01db8..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"orr r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_or_int_lit8.S b/runtime/interpreter/mterp/arm/op_or_int_lit8.S
deleted file mode 100644
index 9882bfc..0000000
--- a/runtime/interpreter/mterp/arm/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"orr r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long.S b/runtime/interpreter/mterp/arm/op_or_long.S
deleted file mode 100644
index 048c45c..0000000
--- a/runtime/interpreter/mterp/arm/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_or_long_2addr.S b/runtime/interpreter/mterp/arm/op_or_long_2addr.S
deleted file mode 100644
index 9395346..0000000
--- a/runtime/interpreter/mterp/arm/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"orr r0, r0, r2", "instr":"orr r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_packed_switch.S b/runtime/interpreter/mterp/arm/op_packed_switch.S
deleted file mode 100644
index 412c58f..0000000
--- a/runtime/interpreter/mterp/arm/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl $func @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/arm/op_rem_double.S b/runtime/interpreter/mterp/arm/op_rem_double.S
deleted file mode 100644
index b539221..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_double.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a double remainder function, but libm does */
-%include "arm/binopWide.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
deleted file mode 100644
index 372ef1d..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_double_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a double remainder function, but libm does */
-%include "arm/binopWide2addr.S" {"instr":"bl fmod"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float.S b/runtime/interpreter/mterp/arm/op_rem_float.S
deleted file mode 100644
index 7bd10de..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_float.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm/binop.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
deleted file mode 100644
index 93c5fae..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_float_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm/binop2addr.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_int.S b/runtime/interpreter/mterp/arm/op_rem_int.S
deleted file mode 100644
index ff62573..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
deleted file mode 100644
index ba5751a..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_2addr.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
deleted file mode 100644
index 4edb187..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_lit16.S
+++ /dev/null
@@ -1,31 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
deleted file mode 100644
index 3888361..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_int_lit8.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm/op_rem_long.S b/runtime/interpreter/mterp/arm/op_rem_long.S
deleted file mode 100644
index b2b1c24..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_long.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
deleted file mode 100644
index f87d493..0000000
--- a/runtime/interpreter/mterp/arm/op_rem_long_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "result0":"r2", "result1":"r3", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm/op_return.S b/runtime/interpreter/mterp/arm/op_return.S
deleted file mode 100644
index f9c0f0f..0000000
--- a/runtime/interpreter/mterp/arm/op_return.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_object.S b/runtime/interpreter/mterp/arm/op_return_object.S
deleted file mode 100644
index c490730..0000000
--- a/runtime/interpreter/mterp/arm/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_return.S"
diff --git a/runtime/interpreter/mterp/arm/op_return_void.S b/runtime/interpreter/mterp/arm/op_return_void.S
deleted file mode 100644
index a91ccb3..0000000
--- a/runtime/interpreter/mterp/arm/op_return_void.S
+++ /dev/null
@@ -1,9 +0,0 @@
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
deleted file mode 100644
index b953f4c..0000000
--- a/runtime/interpreter/mterp/arm/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,7 +0,0 @@
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S
deleted file mode 100644
index df582c0..0000000
--- a/runtime/interpreter/mterp/arm/op_return_wide.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
- b MterpReturn
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int.S b/runtime/interpreter/mterp/arm/op_rsub_int.S
deleted file mode 100644
index 1508dd4..0000000
--- a/runtime/interpreter/mterp/arm/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "arm/binopLit16.S" {"instr":"rsb r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
deleted file mode 100644
index dc953dc..0000000
--- a/runtime/interpreter/mterp/arm/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"rsb r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget.S b/runtime/interpreter/mterp/arm/op_sget.S
deleted file mode 100644
index 575a8c0..0000000
--- a/runtime/interpreter/mterp/arm/op_sget.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern $helper
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl $helper
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if $is_object
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm/op_sget_boolean.S b/runtime/interpreter/mterp/arm/op_sget_boolean.S
deleted file mode 100644
index df1a024..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_byte.S b/runtime/interpreter/mterp/arm/op_sget_byte.S
deleted file mode 100644
index 8ad3ff0..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_char.S b/runtime/interpreter/mterp/arm/op_sget_char.S
deleted file mode 100644
index 5239514..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_object.S b/runtime/interpreter/mterp/arm/op_sget_object.S
deleted file mode 100644
index e61a5a7..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_short.S b/runtime/interpreter/mterp/arm/op_sget_short.S
deleted file mode 100644
index 49493eb..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
deleted file mode 100644
index 5981ec4..0000000
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field@BBBB */
-
- .extern MterpSGetU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetU64
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r9, rINST, lsr #8 @ r9<- AA
- VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, r2, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ vAA/vAA+1<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_int.S b/runtime/interpreter/mterp/arm/op_shl_int.S
deleted file mode 100644
index 7e4c768..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
deleted file mode 100644
index 4286577..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
deleted file mode 100644
index 60a1498..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, asl r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S
deleted file mode 100644
index 82ec6ed..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r2<- r2 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
deleted file mode 100644
index f361a7d..0000000
--- a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_int.S b/runtime/interpreter/mterp/arm/op_shr_int.S
deleted file mode 100644
index 6317605..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
deleted file mode 100644
index cc8632f..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
deleted file mode 100644
index c2f6cb0..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, asr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S
deleted file mode 100644
index a0afe5b..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
deleted file mode 100644
index 976110e..0000000
--- a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sparse_switch.S b/runtime/interpreter/mterp/arm/op_sparse_switch.S
deleted file mode 100644
index 9f7a42b..0000000
--- a/runtime/interpreter/mterp/arm/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm/op_sput.S b/runtime/interpreter/mterp/arm/op_sput.S
deleted file mode 100644
index c4a8978..0000000
--- a/runtime/interpreter/mterp/arm/op_sput.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"MterpSPutU32"}
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl $helper
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_boolean.S b/runtime/interpreter/mterp/arm/op_sput_boolean.S
deleted file mode 100644
index 0c37623..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_byte.S b/runtime/interpreter/mterp/arm/op_sput_byte.S
deleted file mode 100644
index 8d4e754..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_char.S b/runtime/interpreter/mterp/arm/op_sput_char.S
deleted file mode 100644
index 442b56f..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_object.S b/runtime/interpreter/mterp/arm/op_sput_object.S
deleted file mode 100644
index c58918f..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_object.S
+++ /dev/null
@@ -1,11 +0,0 @@
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpSPutObj
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sput_short.S b/runtime/interpreter/mterp/arm/op_sput_short.S
deleted file mode 100644
index 0eb533f..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S
deleted file mode 100644
index 0ed4017..0000000
--- a/runtime/interpreter/mterp/arm/op_sput_wide.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutU64
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_sub_double.S b/runtime/interpreter/mterp/arm/op_sub_double.S
deleted file mode 100644
index 69bcc67..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
deleted file mode 100644
index 2ea59fe..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinopWide2addr.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float.S b/runtime/interpreter/mterp/arm/op_sub_float.S
deleted file mode 100644
index 3f17a0d..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
deleted file mode 100644
index 2f4aac4..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/fbinop2addr.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int.S b/runtime/interpreter/mterp/arm/op_sub_int.S
deleted file mode 100644
index efb9e10..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
deleted file mode 100644
index 4d3036b..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"sub r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long.S b/runtime/interpreter/mterp/arm/op_sub_long.S
deleted file mode 100644
index 6f1eb6e..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
deleted file mode 100644
index 8e9da05..0000000
--- a/runtime/interpreter/mterp/arm/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"subs r0, r0, r2", "instr":"sbc r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_throw.S b/runtime/interpreter/mterp/arm/op_throw.S
deleted file mode 100644
index be49ada..0000000
--- a/runtime/interpreter/mterp/arm/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r1, r2 @ r1<- vAA (exception object)
- cmp r1, #0 @ null object?
- beq common_errNullObject @ yes, throw an NPE instead
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/arm/op_unused_3e.S b/runtime/interpreter/mterp/arm/op_unused_3e.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_3f.S b/runtime/interpreter/mterp/arm/op_unused_3f.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_40.S b/runtime/interpreter/mterp/arm/op_unused_40.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_41.S b/runtime/interpreter/mterp/arm/op_unused_41.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_42.S b/runtime/interpreter/mterp/arm/op_unused_42.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_43.S b/runtime/interpreter/mterp/arm/op_unused_43.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_73.S b/runtime/interpreter/mterp/arm/op_unused_73.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_79.S b/runtime/interpreter/mterp/arm/op_unused_79.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_7a.S b/runtime/interpreter/mterp/arm/op_unused_7a.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f3.S b/runtime/interpreter/mterp/arm/op_unused_f3.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f4.S b/runtime/interpreter/mterp/arm/op_unused_f4.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f5.S b/runtime/interpreter/mterp/arm/op_unused_f5.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f6.S b/runtime/interpreter/mterp/arm/op_unused_f6.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f7.S b/runtime/interpreter/mterp/arm/op_unused_f7.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f8.S b/runtime/interpreter/mterp/arm/op_unused_f8.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_f9.S b/runtime/interpreter/mterp/arm/op_unused_f9.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fc.S b/runtime/interpreter/mterp/arm/op_unused_fc.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_unused_fd.S b/runtime/interpreter/mterp/arm/op_unused_fd.S
deleted file mode 100644
index 10948dc..0000000
--- a/runtime/interpreter/mterp/arm/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/unused.S"
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int.S b/runtime/interpreter/mterp/arm/op_ushr_int.S
deleted file mode 100644
index a74361b..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
deleted file mode 100644
index f2d1d13..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"preinstr":"and r1, r1, #31", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
deleted file mode 100644
index 5554eb0..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"ubfx r1, r3, #8, #5", "instr":"mov r0, r0, lsr r1"}
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S
deleted file mode 100644
index c817bc9..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
deleted file mode 100644
index 2735f87..0000000
--- a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_xor_int.S b/runtime/interpreter/mterp/arm/op_xor_int.S
deleted file mode 100644
index fd7a4b7..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
deleted file mode 100644
index 196a665..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binop2addr.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
deleted file mode 100644
index 39f2a47..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit16.S" {"instr":"eor r0, r0, r1"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
deleted file mode 100644
index 97d0b9e..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopLit8.S" {"extract":"", "instr":"eor r0, r0, r3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long.S b/runtime/interpreter/mterp/arm/op_xor_long.S
deleted file mode 100644
index 4f830d0..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
deleted file mode 100644
index 5b5ed88..0000000
--- a/runtime/interpreter/mterp/arm/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm/binopWide2addr.S" {"preinstr":"eor r0, r0, r2", "instr":"eor r1, r1, r3"}
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
new file mode 100644
index 0000000..340038c
--- /dev/null
+++ b/runtime/interpreter/mterp/arm/other.S
@@ -0,0 +1,379 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- BBBB
+ mov r1, rINST, lsr #8 @ r1<- AA
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl $helper @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 @ load rINST
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 2 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+%def op_const():
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_16():
+ /* const/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_4():
+ /* const/4 vA, #+B */
+ sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
+ ubfx r0, rINST, #8, #4 @ r0<- A
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ SET_VREG r1, r0 @ fp[A]<- r1
+ GOTO_OPCODE ip @ execute next instruction
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ SET_VREG r0, r3 @ vAA<- r0
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r2, 2 @ r2<- BBBB (high)
+ mov r1, rINST, lsr #8 @ r1<- AA
+ orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
+ add r2, rFP, #OFF_FP_SHADOWFRAME
+ mov r3, rSELF
+ bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 @ advance rPC
+ cmp r0, #0 @ fail?
+ bne MterpPossibleException @ let reference interpreter deal with it.
+ ADVANCE 3 @ advance rPC
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_wide():
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH r0, 1 @ r0<- bbbb (low)
+ FETCH r1, 2 @ r1<- BBBB (low middle)
+ FETCH r2, 3 @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH r3, 4 @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
+ FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
+ VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH r0, 1 @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S r2, 2 @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_monitor_enter():
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r1<- self
+ bl artLockObjectFromCode
+ cmp r0, #0
+ bne MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_monitor_exit():
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG r0, r2 @ r0<- vAA (object)
+ mov r1, rSELF @ r0<- self
+ bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ bne MterpException
+ FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[B]
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[A]<- r2
+ .endif
+ GOTO_OPCODE ip @ execute next instruction
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH r1, 2 @ r1<- BBBB
+ FETCH r0, 1 @ r0<- AAAA
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AAAA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_exception():
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH r1, 1 @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_VREG r2, r1 @ r2<- fp[BBBB]
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
+ .else
+ SET_VREG r2, r0 @ fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
+ ldr r0, [r0] @ r0 <- result.i.
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
+ .else
+ SET_VREG r0, r2 @ fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* move-result-wide vAA */
+ mov rINST, rINST, lsr #8 @ rINST<- AA
+ ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r3, rINST, lsr #12 @ r3<- B
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
+ FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 2 @ r3<- BBBB
+ FETCH r2, 1 @ r2<- AAAA
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
+ stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH r3, 1 @ r3<- BBBB
+ mov rINST, rINST, lsr #8 @ rINST<- AA
+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
+ VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
+ FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ GET_INST_OPCODE ip @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE ip @ jump to next instruction
+
+%def op_nop():
+ FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
+ GET_INST_OPCODE ip @ ip<- opcode from rINST
+ GOTO_OPCODE ip @ execute it
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_73():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/arm/unop.S b/runtime/interpreter/mterp/arm/unop.S
deleted file mode 100644
index 56518b5..0000000
--- a/runtime/interpreter/mterp/arm/unop.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- $preinstr @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S
deleted file mode 100644
index 2d0453a..0000000
--- a/runtime/interpreter/mterp/arm/unopNarrower.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0/r1", where
- * "result" is a 32-bit quantity in r0.
- *
- * For: long-to-float, double-to-int, double-to-float
- *
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for op_move.)
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
deleted file mode 100644
index cd5defd..0000000
--- a/runtime/interpreter/mterp/arm/unopWide.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $preinstr @ optional op; may set condition codes
- $instr @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
deleted file mode 100644
index 9d50489..0000000
--- a/runtime/interpreter/mterp/arm/unopWider.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- $preinstr @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- $instr @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
diff --git a/runtime/interpreter/mterp/arm/unused.S b/runtime/interpreter/mterp/arm/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/arm/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/arm/zcmp.S b/runtime/interpreter/mterp/arm/zcmp.S
deleted file mode 100644
index 5db8b6c..0000000
--- a/runtime/interpreter/mterp/arm/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- b${condition} MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/alt_stub.S b/runtime/interpreter/mterp/arm64/alt_stub.S
deleted file mode 100644
index 3a463fe..0000000
--- a/runtime/interpreter/mterp/arm64/alt_stub.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (${opnum} * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
diff --git a/runtime/interpreter/mterp/arm64/arithmetic.S b/runtime/interpreter/mterp/arm64/arithmetic.S
new file mode 100644
index 0000000..cf9dd86
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/arithmetic.S
@@ -0,0 +1,507 @@
+%def binop(preinstr="", result="w0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w9, wINST, #8 // w9<- AA
+ lsr w3, w0, #8 // w3<- CC
+ and w2, w0, #255 // w2<- BB
+ GET_VREG w1, w3 // w1<- vCC
+ GET_VREG w0, w2 // w0<- vBB
+ .if $chkzero
+ cbz w1, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+%def binop2addr(preinstr="", result="w0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w1, w3 // w1<- vB
+ GET_VREG w0, w9 // w0<- vA
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+%def binopLit16(preinstr="", result="w0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
+ lsr w2, wINST, #12 // w2<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG w0, w2 // w0<- vB
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+%def binopLit8(extract="asr w1, w3, #8", preinstr="", result="w0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = w0 op w1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than w0, you can override "result".)
+ *
+ * You can override "extract" if the extraction of the literal value
+ * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
+ * can be omitted completely if the shift is embedded in "instr".
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
+ lsr w9, wINST, #8 // w9<- AA
+ and w2, w3, #255 // w2<- BB
+ GET_VREG w0, w2 // w0<- vBB
+ $extract // optional; typically w1<- ssssssCC (sign extended)
+ .if $chkzero
+ cbz w1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr // optional op; may set condition codes
+ $instr // $result<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG $result, w9 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-12 instructions */
+
+%def binopWide(preinstr="", instr="add x0, x1, x2", result="x0", r1="x1", r2="x2", chkzero="0"):
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = x1 op x2".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than x0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double, rem-double
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE $r2, w2 // w2<- vCC
+ GET_VREG_WIDE $r1, w1 // w1<- vBB
+ .if $chkzero
+ cbz $r2, common_errDivideByZero // is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $preinstr
+ $instr // $result<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE $result, w4 // vAA<- $result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+%def binopWide2addr(preinstr="", instr="add x0, x0, x1", r0="x0", r1="x1", chkzero="0"):
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "x0 = x0 op x1".
+ * This must not be a function call, as we keep w2 live across it.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (w1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE $r1, w1 // x1<- vB
+ GET_VREG_WIDE $r0, w2 // x0<- vA
+ .if $chkzero
+ cbz $r1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $preinstr
+ $instr // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE $r0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+%def shiftWide(opcode="shl"):
+ /*
+ * 64-bit shift operation.
+ *
+ * For: shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w3, wINST, #8 // w3<- AA
+ lsr w2, w0, #8 // w2<- CC
+ GET_VREG w2, w2 // w2<- vCC (shift count)
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE x1, w1 // x1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $opcode x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3 // vAA<- x0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+%def shiftWide2addr(opcode="lsl"):
+ /*
+ * Generic 64-bit shift operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w1, w1 // x1<- vB
+ GET_VREG_WIDE x0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $opcode x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+%def unop(instr=""):
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op w0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ GET_VREG w0, w3 // w0<- vB
+ ubfx w9, wINST, #8, #4 // w9<- A
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $instr // w0<- op, w0-w3 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w9 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+ /* 8-9 instructions */
+
+%def unopWide(instr="sub x0, xzr, x0"):
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op x0".
+ *
+ * For: neg-long, not-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE x0, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $instr
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-11 instructions */
+
+%def op_add_int():
+% binop(instr="add w0, w0, w1")
+
+%def op_add_int_2addr():
+% binop2addr(instr="add w0, w0, w1")
+
+%def op_add_int_lit16():
+% binopLit16(instr="add w0, w0, w1")
+
+%def op_add_int_lit8():
+% binopLit8(extract="", instr="add w0, w0, w3, asr #8")
+
+%def op_add_long():
+% binopWide(instr="add x0, x1, x2")
+
+%def op_add_long_2addr():
+% binopWide2addr(instr="add x0, x0, x1")
+
+%def op_and_int():
+% binop(instr="and w0, w0, w1")
+
+%def op_and_int_2addr():
+% binop2addr(instr="and w0, w0, w1")
+
+%def op_and_int_lit16():
+% binopLit16(instr="and w0, w0, w1")
+
+%def op_and_int_lit8():
+% binopLit8(extract="", instr="and w0, w0, w3, asr #8")
+
+%def op_and_long():
+% binopWide(instr="and x0, x1, x2")
+
+%def op_and_long_2addr():
+% binopWide2addr(instr="and x0, x0, x1")
+
+%def op_cmp_long():
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG_WIDE x1, w2
+ GET_VREG_WIDE x2, w3
+ cmp x1, x2
+ cset w0, ne
+ cneg w0, w0, lt
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG w0, w4
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_div_int():
+% binop(instr="sdiv w0, w0, w1", chkzero="1")
+
+%def op_div_int_2addr():
+% binop2addr(instr="sdiv w0, w0, w1", chkzero="1")
+
+%def op_div_int_lit16():
+% binopLit16(instr="sdiv w0, w0, w1", chkzero="1")
+
+%def op_div_int_lit8():
+% binopLit8(instr="sdiv w0, w0, w1", chkzero="1")
+
+%def op_div_long():
+% binopWide(instr="sdiv x0, x1, x2", chkzero="1")
+
+%def op_div_long_2addr():
+% binopWide2addr(instr="sdiv x0, x0, x1", chkzero="1")
+
+%def op_int_to_byte():
+% unop(instr="sxtb w0, w0")
+
+%def op_int_to_char():
+% unop(instr="uxth w0, w0")
+
+%def op_int_to_long():
+ /* int-to-long vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, w4 // fp[A]<- x0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_int_to_short():
+% unop(instr="sxth w0, w0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+% binop(instr="mul w0, w1, w0")
+
+%def op_mul_int_2addr():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+% binop2addr(instr="mul w0, w1, w0")
+
+%def op_mul_int_lit16():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+% binopLit16(instr="mul w0, w1, w0")
+
+%def op_mul_int_lit8():
+/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
+% binopLit8(instr="mul w0, w1, w0")
+
+%def op_mul_long():
+% binopWide(instr="mul x0, x1, x2")
+
+%def op_mul_long_2addr():
+% binopWide2addr(instr="mul x0, x0, x1")
+
+%def op_neg_int():
+% unop(instr="sub w0, wzr, w0")
+
+%def op_neg_long():
+% unopWide(instr="sub x0, xzr, x0")
+
+%def op_not_int():
+% unop(instr="mvn w0, w0")
+
+%def op_not_long():
+% unopWide(instr="mvn x0, x0")
+
+%def op_or_int():
+% binop(instr="orr w0, w0, w1")
+
+%def op_or_int_2addr():
+% binop2addr(instr="orr w0, w0, w1")
+
+%def op_or_int_lit16():
+% binopLit16(instr="orr w0, w0, w1")
+
+%def op_or_int_lit8():
+% binopLit8(extract="", instr="orr w0, w0, w3, asr #8")
+
+%def op_or_long():
+% binopWide(instr="orr x0, x1, x2")
+
+%def op_or_long_2addr():
+% binopWide2addr(instr="orr x0, x0, x1")
+
+%def op_rem_int():
+% binop(preinstr="sdiv w2, w0, w1", instr="msub w0, w2, w1, w0", chkzero="1")
+
+%def op_rem_int_2addr():
+% binop2addr(preinstr="sdiv w2, w0, w1", instr="msub w0, w2, w1, w0", chkzero="1")
+
+%def op_rem_int_lit16():
+% binopLit16(preinstr="sdiv w3, w0, w1", instr="msub w0, w3, w1, w0", chkzero="1")
+
+%def op_rem_int_lit8():
+% binopLit8(preinstr="sdiv w3, w0, w1", instr="msub w0, w3, w1, w0", chkzero="1")
+
+%def op_rem_long():
+% binopWide(preinstr="sdiv x3, x1, x2", instr="msub x0, x3, x2, x1", chkzero="1")
+
+%def op_rem_long_2addr():
+% binopWide2addr(preinstr="sdiv x3, x0, x1", instr="msub x0, x3, x1, x0", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+% binopLit16(instr="sub w0, w1, w0")
+
+%def op_rsub_int_lit8():
+% binopLit8(instr="sub w0, w1, w0")
+
+%def op_shl_int():
+% binop(instr="lsl w0, w0, w1")
+
+%def op_shl_int_2addr():
+% binop2addr(instr="lsl w0, w0, w1")
+
+%def op_shl_int_lit8():
+% binopLit8(extract="ubfx w1, w3, #8, #5", instr="lsl w0, w0, w1")
+
+%def op_shl_long():
+% shiftWide(opcode="lsl")
+
+%def op_shl_long_2addr():
+% shiftWide2addr(opcode="lsl")
+
+%def op_shr_int():
+% binop(instr="asr w0, w0, w1")
+
+%def op_shr_int_2addr():
+% binop2addr(instr="asr w0, w0, w1")
+
+%def op_shr_int_lit8():
+% binopLit8(extract="ubfx w1, w3, #8, #5", instr="asr w0, w0, w1")
+
+%def op_shr_long():
+% shiftWide(opcode="asr")
+
+%def op_shr_long_2addr():
+% shiftWide2addr(opcode="asr")
+
+%def op_sub_int():
+% binop(instr="sub w0, w0, w1")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="sub w0, w0, w1")
+
+%def op_sub_long():
+% binopWide(instr="sub x0, x1, x2")
+
+%def op_sub_long_2addr():
+% binopWide2addr(instr="sub x0, x0, x1")
+
+%def op_ushr_int():
+% binop(instr="lsr w0, w0, w1")
+
+%def op_ushr_int_2addr():
+% binop2addr(instr="lsr w0, w0, w1")
+
+%def op_ushr_int_lit8():
+% binopLit8(extract="ubfx w1, w3, #8, #5", instr="lsr w0, w0, w1")
+
+%def op_ushr_long():
+% shiftWide(opcode="lsr")
+
+%def op_ushr_long_2addr():
+% shiftWide2addr(opcode="lsr")
+
+%def op_xor_int():
+% binop(instr="eor w0, w0, w1")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="eor w0, w0, w1")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="eor w0, w0, w1")
+
+%def op_xor_int_lit8():
+% binopLit8(extract="", instr="eor w0, w0, w3, asr #8")
+
+%def op_xor_long():
+% binopWide(instr="eor x0, x1, x2")
+
+%def op_xor_long_2addr():
+% binopWide2addr(instr="eor x0, x0, x1")
diff --git a/runtime/interpreter/mterp/arm64/array.S b/runtime/interpreter/mterp/arm64/array.S
new file mode 100644
index 0000000..a023d22
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/array.S
@@ -0,0 +1,235 @@
+%def op_aget(load="ldr", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz x0, common_errNullObject // bail if null array object.
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $load w2, [x0, #$data_offset] // w2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w2, w9 // vAA<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_aget_boolean():
+% op_aget(load="ldrb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="ldrsb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="ldrh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ FETCH_B w3, 1, 1 // w3<- CC
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ bl artAGetObjectFromMterp // (array, index)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ lsr w2, wINST, #8 // w9<- AA
+ PREFETCH_INST 2
+ cbnz w1, MterpException
+ SET_VREG_OBJECT w0, w2
+ ADVANCE 2
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_aget_short():
+% op_aget(load="ldrsh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // yes, bail
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x2, w4
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B w2, 1, 0 // w2<- BB
+ lsr w9, wINST, #8 // w9<- AA
+ FETCH_B w3, 1, 1 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #$shift // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_VREG w2, w9 // w2<- vAA
+ GET_INST_OPCODE ip // extract opcode from rINST
+ $store w2, [x0, #$data_offset] // vBB[vCC]<- w2
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_aput_boolean():
+% op_aput(store="strb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(store="strb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(store="strh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpAputObject
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_aput_short():
+% op_aput(store="strh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG w0, w2 // w0<- vBB (array object)
+ GET_VREG w1, w3 // w1<- vCC (requested index)
+ cbz w0, common_errNullObject // bail if null
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
+ add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
+ cmp w1, w3 // compare unsigned index, length
+ bcs common_errArrayIndex // index >= length, bail
+ GET_VREG_WIDE x1, w4
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_array_length():
+ /*
+ * Return the length of an array.
+ */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG w0, w1 // w0<- vB (object ref)
+ cbz w0, common_errNullObject // yup, fail
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w3, w2 // vB<- length
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // x0<- 000000000000bbbb (lo)
+ FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr x1, x0, x1, lsl #16 // x1<- ssssssssBBBBbbbb
+ GET_VREG w0, w3 // w0<- vAA (array object)
+ add x1, xPC, x1, lsl #1 // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
+ bl MterpFillArrayData // (obj, payload)
+ cbz w0, MterpPossibleException // exception?
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern $helper
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov x2, xSELF
+ bl $helper
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ mov x3, xSELF
+ bl MterpNewArray
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/bincmp.S b/runtime/interpreter/mterp/arm64/bincmp.S
deleted file mode 100644
index 8dd4fed..0000000
--- a/runtime/interpreter/mterp/arm64/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.${condition} MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/binop.S b/runtime/interpreter/mterp/arm64/binop.S
deleted file mode 100644
index b629b0b..0000000
--- a/runtime/interpreter/mterp/arm64/binop.S
+++ /dev/null
@@ -1,33 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if $chkzero
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $preinstr // optional op; may set condition codes
- $instr // $result<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG $result, w9 // vAA<- $result
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binop2addr.S b/runtime/interpreter/mterp/arm64/binop2addr.S
deleted file mode 100644
index a480a7d..0000000
--- a/runtime/interpreter/mterp/arm64/binop2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if $chkzero
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- $preinstr // optional op; may set condition codes
- $instr // $result<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG $result, w9 // vAA<- $result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit16.S b/runtime/interpreter/mterp/arm64/binopLit16.S
deleted file mode 100644
index 4f9d205..0000000
--- a/runtime/interpreter/mterp/arm64/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"w0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if $chkzero
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $preinstr
- $instr // $result<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG $result, w9 // vAA<- $result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopLit8.S b/runtime/interpreter/mterp/arm64/binopLit8.S
deleted file mode 100644
index dfa3169..0000000
--- a/runtime/interpreter/mterp/arm64/binopLit8.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"extract": "asr w1, w3, #8", "preinstr":"", "result":"w0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- $extract // optional; typically w1<- ssssssCC (sign extended)
- .if $chkzero
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $preinstr // optional op; may set condition codes
- $instr // $result<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG $result, w9 // vAA<- $result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide.S b/runtime/interpreter/mterp/arm64/binopWide.S
deleted file mode 100644
index 9de24f1..0000000
--- a/runtime/interpreter/mterp/arm64/binopWide.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "instr":"add x0, x1, x2", "result":"x0", "r1":"x1", "r2":"x2", "chkzero":"0"}
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE $r2, w2 // w2<- vCC
- GET_VREG_WIDE $r1, w1 // w1<- vBB
- .if $chkzero
- cbz $r2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $preinstr
- $instr // $result<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE $result, w4 // vAA<- $result
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/binopWide2addr.S b/runtime/interpreter/mterp/arm64/binopWide2addr.S
deleted file mode 100644
index d9927a2..0000000
--- a/runtime/interpreter/mterp/arm64/binopWide2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"preinstr":"", "instr":"add x0, x0, x1", "r0":"x0", "r1":"x1", "chkzero":"0"}
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE $r1, w1 // x1<- vB
- GET_VREG_WIDE $r0, w2 // x0<- vA
- .if $chkzero
- cbz $r1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- $preinstr
- $instr // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE $r0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/close_cfi.S b/runtime/interpreter/mterp/arm64/close_cfi.S
deleted file mode 100644
index 7ba0486..0000000
--- a/runtime/interpreter/mterp/arm64/close_cfi.S
+++ /dev/null
@@ -1,4 +0,0 @@
-// Close out the cfi info. We're treating mterp as a single function.
-
-END ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/arm64/const.S b/runtime/interpreter/mterp/arm64/const.S
deleted file mode 100644
index 6f82bbf..0000000
--- a/runtime/interpreter/mterp/arm64/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl $helper // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/control_flow.S b/runtime/interpreter/mterp/arm64/control_flow.S
new file mode 100644
index 0000000..b634c98
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/control_flow.S
@@ -0,0 +1,223 @@
+%def bincmp(condition=""):
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w0, wINST, #8, #4 // w0<- A
+ GET_VREG w3, w1 // w3<- vB
+ GET_VREG w2, w0 // w2<- vA
+ FETCH_S wINST, 1 // wINST<- branch offset, in code units
+ cmp w2, w3 // compare (vA, vB)
+ b.${condition} MterpCommonTakenBranchNoFlags
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.eq .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def zcmp(compare="1", branch=""):
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ lsr w0, wINST, #8 // w0<- AA
+ GET_VREG w2, w0 // w2<- vAA
+ FETCH_S wINST, 1 // w1<- branch offset, in code units
+ .if ${compare}
+ cmp w2, #0 // compare (vA, 0)
+ .endif
+ ${branch} MterpCommonTakenBranchNoFlags
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.eq .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_goto():
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ sbfx wINST, wINST, #8, #8 // wINST<- ssssssAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S wINST, 1 // wINST<- ssssAAAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH w0, 1 // w0<- aaaa (lo)
+ FETCH w1, 2 // w1<- AAAA (hi)
+ orr wINST, w0, w1, lsl #16 // wINST<- AAAAaaaa
+ b MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+% bincmp(condition="eq")
+
+%def op_if_eqz():
+% zcmp(compare="0", branch="cbz w2,")
+
+%def op_if_ge():
+% bincmp(condition="ge")
+
+%def op_if_gez():
+% zcmp(compare="0", branch="tbz w2, #31,")
+
+%def op_if_gt():
+% bincmp(condition="gt")
+
+%def op_if_gtz():
+% zcmp(branch="b.gt")
+
+%def op_if_le():
+% bincmp(condition="le")
+
+%def op_if_lez():
+% zcmp(branch="b.le")
+
+%def op_if_lt():
+% bincmp(condition="lt")
+
+%def op_if_ltz():
+% zcmp(compare="0", branch="tbnz w2, #31,")
+
+%def op_if_ne():
+% bincmp(condition="ne")
+
+%def op_if_nez():
+% zcmp(compare="0", branch="cbnz w2,")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH w0, 1 // x0<- 000000000000bbbb (lo)
+ FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
+ lsr w3, wINST, #8 // w3<- AA
+ orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
+ GET_VREG w1, w3 // w1<- vAA
+ add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
+ bl $func // w0<- code-unit branch offset
+ sxtw xINST, w0
+ b MterpCommonTakenBranchNoFlags
+
+%def op_return():
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w0, w2 // r0<- vAA
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
+
+%def op_return_object():
+% op_return()
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ mov x0, #0
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
+
+%def op_return_void_no_barrier():
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ mov x0, #0
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
+
+%def op_return_wide():
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ bl MterpThreadFenceForConstructor
+ ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
+ mov x0, xSELF
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne .L${opcode}_check
+.L${opcode}_return:
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x0, w2 // x0<- vAA
+ b MterpReturn
+.L${opcode}_check:
+ bl MterpSuspendCheck // (self)
+ b .L${opcode}_return
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // r2<- AA
+ GET_VREG w1, w2 // r1<- vAA (exception object)
+ cbz w1, common_errNullObject
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S
deleted file mode 100644
index cf38a29..0000000
--- a/runtime/interpreter/mterp/arm64/entry.S
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- .text
-
-/*
- * Interpreter entry point.
- * On entry:
- * x0 Thread* self/
- * x1 insns_
- * x2 ShadowFrame
- * x3 JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
- SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
- SAVE_TWO_REGS xIBASE, xREFS, 16
- SAVE_TWO_REGS xSELF, xINST, 32
- SAVE_TWO_REGS xPC, xFP, 48
- SAVE_TWO_REGS fp, lr, 64
- add fp, sp, #64
-
- /* Remember the return register */
- str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov xSELF, x0
- ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
- add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
- ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- bl MterpSetUpHotnessCountdown
- mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST // load wINST from rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/arm64/fallback.S b/runtime/interpreter/mterp/arm64/fallback.S
deleted file mode 100644
index 44e7e12..0000000
--- a/runtime/interpreter/mterp/arm64/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
- b MterpFallback
-
diff --git a/runtime/interpreter/mterp/arm64/fbinop.S b/runtime/interpreter/mterp/arm64/fbinop.S
deleted file mode 100644
index 926d078..0000000
--- a/runtime/interpreter/mterp/arm64/fbinop.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {}
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- $instr // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fbinop2addr.S b/runtime/interpreter/mterp/arm64/fbinop2addr.S
deleted file mode 100644
index 04236ad..0000000
--- a/runtime/interpreter/mterp/arm64/fbinop2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- $instr // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/fcmp.S b/runtime/interpreter/mterp/arm64/fcmp.S
deleted file mode 100644
index cad6318..0000000
--- a/runtime/interpreter/mterp/arm64/fcmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"", "r1":"s1", "r2":"s2", "cond":"lt"}
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG$wide $r1, w2
- GET_VREG$wide $r2, w3
- fcmp $r1, $r2
- cset w0, ne
- cneg w0, w0, $cond
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/floating_point.S b/runtime/interpreter/mterp/arm64/floating_point.S
new file mode 100644
index 0000000..04ca694
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/floating_point.S
@@ -0,0 +1,269 @@
+%def fbinop(instr=""):
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ * form: <op> s0, s0, s1
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH w0, 1 // r0<- CCBB
+ lsr w1, w0, #8 // r2<- CC
+ and w0, w0, #255 // r1<- BB
+ GET_VREG s1, w1
+ GET_VREG s0, w0
+ $instr // s0<- op
+ lsr w1, wINST, #8 // r1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fbinop2addr(instr=""):
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ $instr // s2<- op
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fcmp(wide="", r1="s1", r2="s2", cond="lt"):
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ and w2, w0, #255 // w2<- BB
+ lsr w3, w0, #8 // w3<- CC
+ GET_VREG$wide $r1, w2
+ GET_VREG$wide $r2, w3
+ fcmp $r1, $r2
+ cset w0, ne
+ cneg w0, w0, $cond
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG w0, w4 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def funopNarrow(srcreg="s0", tgtreg="d0", instr=""):
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-float, float-to-int
+ * TODO: refactor all of the conversions - parameterize width and use same template.
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def funopNarrower(srcreg="s0", tgtreg="d0", instr=""):
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def funopWide(srcreg="s0", tgtreg="d0", instr=""):
+ /*
+ * Generic 64bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG_WIDE $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def funopWider(srcreg="s0", tgtreg="d0", instr=""):
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
+ *
+ * For: int-to-double, float-to-double, float-to-long
+ */
+ /* unop vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w4, wINST, #8, #4 // w4<- A
+ GET_VREG $srcreg, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ $instr // d0<- op
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_add_double():
+% binopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_add_double_2addr():
+% binopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1")
+
+%def op_add_float():
+% fbinop(instr="fadd s0, s0, s1")
+
+%def op_add_float_2addr():
+% fbinop2addr(instr="fadd s2, s0, s1")
+
+%def op_cmpg_double():
+% fcmp(wide="_WIDE", r1="d1", r2="d2", cond="cc")
+
+%def op_cmpg_float():
+% fcmp(wide="", r1="s1", r2="s2", cond="cc")
+
+%def op_cmpl_double():
+% fcmp(wide="_WIDE", r1="d1", r2="d2", cond="lt")
+
+%def op_cmpl_float():
+% fcmp(wide="", r1="s1", r2="s2", cond="lt")
+
+%def op_div_double():
+% binopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_div_double_2addr():
+% binopWide2addr(instr="fdiv d0, d0, d1", r0="d0", r1="d1")
+
+%def op_div_float():
+% fbinop(instr="fdiv s0, s0, s1")
+
+%def op_div_float_2addr():
+% fbinop2addr(instr="fdiv s2, s0, s1")
+
+%def op_double_to_float():
+% funopNarrower(instr="fcvt s0, d0", srcreg="d0", tgtreg="s0")
+
+%def op_double_to_int():
+% funopNarrower(instr="fcvtzs w0, d0", srcreg="d0", tgtreg="w0")
+
+%def op_double_to_long():
+% funopWide(instr="fcvtzs x0, d0", srcreg="d0", tgtreg="x0")
+
+%def op_float_to_double():
+% funopWider(instr="fcvt d0, s0", srcreg="s0", tgtreg="d0")
+
+%def op_float_to_int():
+% funopNarrow(instr="fcvtzs w0, s0", srcreg="s0", tgtreg="w0")
+
+%def op_float_to_long():
+% funopWider(instr="fcvtzs x0, s0", srcreg="s0", tgtreg="x0")
+
+%def op_int_to_double():
+% funopWider(instr="scvtf d0, w0", srcreg="w0", tgtreg="d0")
+
+%def op_int_to_float():
+% funopNarrow(instr="scvtf s0, w0", srcreg="w0", tgtreg="s0")
+
+%def op_long_to_double():
+% funopWide(instr="scvtf d0, x0", srcreg="x0", tgtreg="d0")
+
+%def op_long_to_float():
+% funopNarrower(instr="scvtf s0, x0", srcreg="x0", tgtreg="s0")
+
+%def op_mul_double():
+% binopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_mul_double_2addr():
+% binopWide2addr(instr="fmul d0, d0, d1", r0="d0", r1="d1")
+
+%def op_mul_float():
+% fbinop(instr="fmul s0, s0, s1")
+
+%def op_mul_float_2addr():
+% fbinop2addr(instr="fmul s2, s0, s1")
+
+%def op_neg_double():
+% unopWide(instr="eor x0, x0, #0x8000000000000000")
+
+%def op_neg_float():
+% unop(instr="eor w0, w0, #0x80000000")
+
+%def op_rem_double():
+ /* rem vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_WIDE d1, w2 // d1<- vCC
+ GET_VREG_WIDE d0, w1 // d0<- vBB
+ bl fmod
+ lsr w4, wINST, #8 // w4<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w4 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 11-14 instructions */
+
+%def op_rem_double_2addr():
+ /* rem vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE d1, w1 // d1<- vB
+ GET_VREG_WIDE d0, w2 // d0<- vA
+ bl fmod
+ ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE d0, w2 // vAA<- result
+ GOTO_OPCODE ip // jump to next instruction
+ /* 10-13 instructions */
+
+%def op_rem_float():
+/* EABI doesn't define a float remainder function, but libm does */
+% fbinop(instr="bl fmodf")
+
+%def op_rem_float_2addr():
+ /* rem vA, vB */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w9, wINST, #8, #4 // w9<- A
+ GET_VREG s1, w3
+ GET_VREG s0, w9
+ bl fmodf
+ ubfx w9, wINST, #8, #4 // w9<- A
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG s0, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_sub_double():
+% binopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
+
+%def op_sub_double_2addr():
+% binopWide2addr(instr="fsub d0, d0, d1", r0="d0", r1="d1")
+
+%def op_sub_float():
+% fbinop(instr="fsub s0, s0, s1")
+
+%def op_sub_float_2addr():
+% fbinop2addr(instr="fsub s2, s0, s1")
diff --git a/runtime/interpreter/mterp/arm64/footer.S b/runtime/interpreter/mterp/arm64/footer.S
deleted file mode 100644
index 0ce3543..0000000
--- a/runtime/interpreter/mterp/arm64/footer.S
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
- cbz x0, MterpFallback // If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException // (self, shadow_frame)
- cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr w1, [xFP, #OFF_FP_DEX_PC]
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * wINST <= signed offset
- * wPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp wINST, #0
- b.gt .L_forward_branch // don't add forward branches to hotness
- tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
- subs wPROFILE, wPROFILE, #1 // countdown
- b.eq .L_add_batch // counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- REFRESH_IBASE
- ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L_suspend_request_pending
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback
- REFRESH_IBASE // might have changed during suspend
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_no_count_backwards:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.ne .L_resume_backward_branch
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_osr_forward
-.L_resume_forward_branch:
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_check_osr_forward:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add x1, xFP, #OFF_FP_SHADOWFRAME
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr x0, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/*
- * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne check1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-check1:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- sxtw x2, wINST
- bl MterpLogOSR
-#endif
- mov x0, #1 // Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov x0, #0 // signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* xFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov x0, #1 // signal return to caller.
- b MterpDone
-MterpReturn:
- ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
- str x0, [x2]
- mov x0, #1 // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp wPROFILE, #0
- bgt MterpProfileActive // if > 0, we may have some counts to report.
- .cfi_remember_state
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
- .cfi_restore_state // Reset unwind info so following code unwinds.
- .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598
-
-MterpProfileActive:
- mov xINST, x0 // stash return value
- /* Report cached hotness counts */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov x0, xINST // restore return value
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
-
diff --git a/runtime/interpreter/mterp/arm64/funopNarrow.S b/runtime/interpreter/mterp/arm64/funopNarrow.S
deleted file mode 100644
index aed830b..0000000
--- a/runtime/interpreter/mterp/arm64/funopNarrow.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
- *
- * For: int-to-float, float-to-int
- * TODO: refactor all of the conversions - parameterize width and use same template.
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG $srcreg, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $instr // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopNarrower.S b/runtime/interpreter/mterp/arm64/funopNarrower.S
deleted file mode 100644
index 6fddfea..0000000
--- a/runtime/interpreter/mterp/arm64/funopNarrower.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE $srcreg, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $instr // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWide.S b/runtime/interpreter/mterp/arm64/funopWide.S
deleted file mode 100644
index 409e26b..0000000
--- a/runtime/interpreter/mterp/arm64/funopWide.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
- /*
- * Generic 64bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
- *
- * For: long-to-double, double-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE $srcreg, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $instr // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE $tgtreg, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/funopWider.S b/runtime/interpreter/mterp/arm64/funopWider.S
deleted file mode 100644
index 4c91ebc..0000000
--- a/runtime/interpreter/mterp/arm64/funopWider.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"srcreg":"s0", "tgtreg":"d0"}
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "$tgtreg = op $srcreg".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG $srcreg, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $instr // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE $tgtreg, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S
deleted file mode 100644
index 0722804..0000000
--- a/runtime/interpreter/mterp/arm64/header.S
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via xFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
- r0 : w0 is 32-bit return register and x0 is 64-bit.
- r0-r7 : Argument registers.
- r8-r15 : Caller save registers (used as temporary registers).
- r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
- the linker, by the trampolines and other stubs (the backend uses
- these as temporary registers).
- r18 : Caller save register (used as temporary register).
- r19 : Pointer to thread-local storage.
- r20-r29: Callee save registers.
- r30 : (lr) is reserved (the link register).
- rsp : (sp) is reserved (the stack pointer).
- rzr : (zr) is reserved (the zero register).
-
- Floating-point registers
- v0-v31
-
- v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
- This is analogous to the C/C++ (hard-float) calling convention.
- v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
- Also used as temporary and codegen scratch registers.
-
- v0-v7 and v16-v31 : trashed across C calls.
- v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
- v16-v31: Used as codegen temp/scratch.
- v8-v15 : Can be used for promotion.
-
- Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- x20 xPC interpreted program counter, used for fetching instructions
- x21 xFP interpreted frame pointer, used for accessing locals and args
- x22 xSELF self (Thread) pointer
- x23 xINST first 16-bit code unit of current instruction
- x24 xIBASE interpreted instruction base pointer, used for computed goto
- x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- x26 wPROFILE jit profile hotness countdown
- x16 ip scratch reg
- x17 ip2 scratch reg (used by macros)
-
-Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC x20
-#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define xFP x21
-#define xSELF x22
-#define xINST x23
-#define wINST w23
-#define xIBASE x24
-#define xREFS x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip x16
-#define ip2 x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST. Does not advance xPC.
- */
-.macro FETCH_INST
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances xPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
- * xINST ahead of possible exception point. Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
- add xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- add xPC, xPC, \reg, sxtw
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
- add \reg, xIBASE, \reg, lsl #${handler_size_bits}
- br \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add \reg, \base, \reg, lsl #${handler_size_bits}
- br \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [xFP, \vreg, uxtw #2]
- str wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [xFP, \vreg, uxtw #2]
- str \reg, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
- */
-.macro GET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- ldr \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- str \reg, [ip2]
- add ip2, xREFS, \vreg, lsl #2
- str xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
- ldrsw \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
- stp \reg1, \reg2, [sp, #(\offset)]
- .cfi_rel_offset \reg1, (\offset)
- .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
- ldp \reg1, \reg2, [sp, #(\offset)]
- .cfi_restore \reg1
- .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
- stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
- .cfi_adjust_cfa_offset (\frame_adjustment)
- .cfi_rel_offset \reg1, 0
- .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
- ldp \reg1, \reg2, [sp], #(\frame_adjustment)
- .cfi_restore \reg1
- .cfi_restore \reg2
- .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
diff --git a/runtime/interpreter/mterp/arm64/instruction_end.S b/runtime/interpreter/mterp/arm64/instruction_end.S
deleted file mode 100644
index f90ebd0..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_alt.S b/runtime/interpreter/mterp/arm64/instruction_end_alt.S
deleted file mode 100644
index 0b66dbb..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_end_sister.S b/runtime/interpreter/mterp/arm64/instruction_end_sister.S
deleted file mode 100644
index 71c0300..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .type artMterpAsmSisterEnd, #object
- .hidden artMterpAsmSisterEnd
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/arm64/instruction_start.S b/runtime/interpreter/mterp/arm64/instruction_start.S
deleted file mode 100644
index b7e9cf5..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_alt.S b/runtime/interpreter/mterp/arm64/instruction_start_alt.S
deleted file mode 100644
index 7a67ba0..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
diff --git a/runtime/interpreter/mterp/arm64/instruction_start_sister.S b/runtime/interpreter/mterp/arm64/instruction_start_sister.S
deleted file mode 100644
index 0036061..0000000
--- a/runtime/interpreter/mterp/arm64/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
- .type artMterpAsmSisterStart, #object
- .hidden artMterpAsmSisterStart
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/arm64/invoke.S b/runtime/interpreter/mterp/arm64/invoke.S
index 7a32df7..03ac316 100644
--- a/runtime/interpreter/mterp/arm64/invoke.S
+++ b/runtime/interpreter/mterp/arm64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -18,3 +18,93 @@
GET_INST_OPCODE ip
GOTO_OPCODE ip
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ mov x3, xINST
+ bl $helper
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 4
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/arm64/invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
deleted file mode 100644
index 7906f0a..0000000
--- a/runtime/interpreter/mterp/arm64/invoke_polymorphic.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl $helper
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
new file mode 100644
index 0000000..1b72e79
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -0,0 +1,784 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via xFP &
+ number_of_vregs_.
+
+ */
+
+/*
+ARM64 Runtime register usage conventions.
+
+ r0 : w0 is 32-bit return register and x0 is 64-bit.
+ r0-r7 : Argument registers.
+ r8-r15 : Caller save registers (used as temporary registers).
+ r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
+ the linker, by the trampolines and other stubs (the backend uses
+ these as temporary registers).
+ r18 : Caller save register (used as temporary register).
+ r19 : Pointer to thread-local storage.
+ r20-r29: Callee save registers.
+ r30 : (lr) is reserved (the link register).
+ rsp : (sp) is reserved (the stack pointer).
+ rzr : (zr) is reserved (the zero register).
+
+ Floating-point registers
+ v0-v31
+
+ v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
+ This is analogous to the C/C++ (hard-float) calling convention.
+ v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
+ Also used as temporary and codegen scratch registers.
+
+ v0-v7 and v16-v31 : trashed across C calls.
+ v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
+
+ v16-v31: Used as codegen temp/scratch.
+ v8-v15 : Can be used for promotion.
+
+ Must maintain 16-byte stack alignment.
+
+Mterp notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ x20 xPC interpreted program counter, used for fetching instructions
+ x21 xFP interpreted frame pointer, used for accessing locals and args
+ x22 xSELF self (Thread) pointer
+ x23 xINST first 16-bit code unit of current instruction
+ x24 xIBASE interpreted instruction base pointer, used for computed goto
+ x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+ x26 wPROFILE jit profile hotness countdown
+ x16 ip scratch reg
+ x17 ip2 scratch reg (used by macros)
+
+Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
+codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/* During bringup, we'll use the shadow frame model instead of xFP */
+/* single-purpose registers, given names for clarity */
+#define xPC x20
+#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC).
+#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
+#define xFP x21
+#define xSELF x22
+#define xINST x23
+#define wINST w23
+#define xIBASE x24
+#define xREFS x25
+#define wPROFILE w26
+#define xPROFILE x26
+#define ip x16
+#define ip2 x17
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
+.endm
+
+/*
+ * Fetch the next instruction from xPC into wINST. Does not advance xPC.
+ */
+.macro FETCH_INST
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances xPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ldrh wINST, [xPC, #((\count)*2)]!
+.endm
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to xPC and xINST).
+ */
+.macro PREFETCH_ADVANCE_INST dreg, sreg, count
+ ldrh \dreg, [\sreg, #((\count)*2)]!
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
+ * xINST ahead of possible exception point. Be sure to manually advance xPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ ldrh wINST, [xPC, #((\count)*2)]
+.endm
+
+/* Advance xPC by some number of code units. */
+.macro ADVANCE count
+ add xPC, xPC, #((\count)*2)
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ add xPC, xPC, \reg, sxtw
+ ldrh wINST, [xPC]
+.endm
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance xPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+.macro FETCH reg, count
+ ldrh \reg, [xPC, #((\count)*2)]
+.endm
+
+.macro FETCH_S reg, count
+ ldrsh \reg, [xPC, #((\count)*2)]
+.endm
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+.macro FETCH_B reg, count, byte
+ ldrb \reg, [xPC, #((\count)*2+(\byte))]
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, xINST, #255
+.endm
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+.macro GET_PREFETCHED_OPCODE oreg, ireg
+ and \oreg, \ireg, #255
+.endm
+
+/*
+ * Begin executing the opcode in _reg. Clobbers reg
+ */
+
+.macro GOTO_OPCODE reg
+ add \reg, xIBASE, \reg, lsl #${handler_size_bits}
+ br \reg
+.endm
+.macro GOTO_OPCODE_BASE base,reg
+ add \reg, \base, \reg, lsl #${handler_size_bits}
+ br \reg
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+.macro GET_VREG reg, vreg
+ ldr \reg, [xFP, \vreg, uxtw #2]
+.endm
+.macro SET_VREG reg, vreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str wzr, [xREFS, \vreg, uxtw #2]
+.endm
+.macro SET_VREG_OBJECT reg, vreg, tmpreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str \reg, [xREFS, \vreg, uxtw #2]
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * TUNING: can we do better here?
+ */
+.macro GET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ ldr \reg, [ip2]
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ str \reg, [ip2]
+ add ip2, xREFS, \vreg, lsl #2
+ str xzr, [ip2]
+.endm
+
+/*
+ * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
+ * Used to avoid an extra instruction in int-to-long.
+ */
+.macro GET_VREG_S reg, vreg
+ ldrsw \reg, [xFP, \vreg, uxtw #2]
+.endm
+
+/*
+ * Convert a virtual register index into an address.
+ */
+.macro VREG_INDEX_TO_ADDR reg, vreg
+ add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+.endm
+
+/*
+ * Save two registers to the stack.
+ */
+.macro SAVE_TWO_REGS reg1, reg2, offset
+ stp \reg1, \reg2, [sp, #(\offset)]
+ .cfi_rel_offset \reg1, (\offset)
+ .cfi_rel_offset \reg2, (\offset) + 8
+.endm
+
+/*
+ * Restore two registers from the stack.
+ */
+.macro RESTORE_TWO_REGS reg1, reg2, offset
+ ldp \reg1, \reg2, [sp, #(\offset)]
+ .cfi_restore \reg1
+ .cfi_restore \reg2
+.endm
+
+/*
+ * Increase frame size and save two registers to the bottom of the stack.
+ */
+.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
+ stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
+ .cfi_adjust_cfa_offset (\frame_adjustment)
+ .cfi_rel_offset \reg1, 0
+ .cfi_rel_offset \reg2, 8
+.endm
+
+/*
+ * Restore two registers from the bottom of the stack and decrease frame size.
+ */
+.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
+ ldp \reg1, \reg2, [sp], #(\frame_adjustment)
+ .cfi_restore \reg1
+ .cfi_restore \reg2
+ .cfi_adjust_cfa_offset -(\frame_adjustment)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+ .type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+.endm
+
+.macro END name
+ .size \name, .-\name
+.endm
+
+// Macro to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ neg \rRef, \rRef
+#endif // USE_HEAP_POISONING
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ .text
+
+/*
+ * Interpreter entry point.
+ * On entry:
+ * x0 Thread* self/
+ * x1 insns_
+ * x2 ShadowFrame
+ * x3 JValue* result_register
+ *
+ */
+ENTRY ExecuteMterpImpl
+ .cfi_startproc
+ SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
+ SAVE_TWO_REGS xIBASE, xREFS, 16
+ SAVE_TWO_REGS xSELF, xINST, 32
+ SAVE_TWO_REGS xPC, xFP, 48
+ SAVE_TWO_REGS fp, lr, 64
+ add fp, sp, #64
+
+ /* Remember the return register */
+ str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
+
+ /* Remember the dex instruction pointer */
+ str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
+
+ /* set up "named" registers */
+ mov xSELF, x0
+ ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
+ add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
+ add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
+ ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
+ add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+ EXPORT_PC
+
+ /* Starting ibase */
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+
+ /* Set up for backwards branches & osr profiling */
+ ldr x0, [xFP, #OFF_FP_METHOD]
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xSELF
+ bl MterpSetUpHotnessCountdown
+ mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST // load wINST from rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+ /* NOTE: no fallthrough */
+ // cfi info continues, and covers the whole mterp implementation.
+ END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
+ .extern MterpCheckBefore
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xPC
+ b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ bl Mterp_dchecks_before_helper
+ #endif
+
+%def footer():
+ .cfi_endproc
+ END MterpHelpers
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+
+%def helpers():
+ ENTRY MterpHelpers
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNegativeArraySizeException
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNoSuchMethodException
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogExceptionThrownException
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
+ bl MterpLogSuspendFallback
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ cbz x0, MterpFallback // If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpHandleException // (self, shadow_frame)
+ cbz w0, MterpExceptionReturn // no local catch, back to caller.
+ ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
+ ldr w1, [xFP, #OFF_FP_DEX_PC]
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
+ add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ bl MterpShouldSwitchInterpreters
+ cbnz w0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC
+ FETCH_INST
+ GET_INST_OPCODE ip
+ GOTO_OPCODE ip
+ /* NOTE: no fallthrough */
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * wINST <= signed offset
+ * wPROFILE <= signed hotness countdown (expanded to 32 bits)
+ * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+ cmp wINST, #0
+ b.gt .L_forward_branch // don't add forward branches to hotness
+ tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
+ subs wPROFILE, wPROFILE, #1 // countdown
+ b.eq .L_add_batch // counted down to zero - report
+.L_resume_backward_branch:
+ ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
+ add w2, wINST, wINST // w2<- byte offset
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ REFRESH_IBASE
+ ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne .L_suspend_request_pending
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ cbnz x0, MterpFallback
+ REFRESH_IBASE // might have changed during suspend
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_no_count_backwards:
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.ne .L_resume_backward_branch
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ b .L_resume_backward_branch
+
+.L_forward_branch:
+ cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
+ b.eq .L_check_osr_forward
+.L_resume_forward_branch:
+ add w2, wINST, wINST // w2<- byte offset
+ FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+.L_check_osr_forward:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xINST
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ b .L_resume_forward_branch
+
+.L_add_batch:
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ ldr x0, [xFP, #OFF_FP_METHOD]
+ mov x2, xSELF
+ bl MterpAddHotnessBatch // (method, shadow_frame, self)
+ mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
+ b .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, #2
+ EXPORT_PC
+ bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
+ cbnz x0, MterpOnStackReplacement
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/*
+ * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
+ * still needs to get the opcode and branch to it, and flags are in lr.
+ */
+MterpCheckSuspendAndContinue:
+ ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
+ ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ b.ne check1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+check1:
+ EXPORT_PC
+ mov x0, xSELF
+ bl MterpSuspendCheck // (self)
+ cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ sxtw x2, wINST
+ bl MterpLogOSR
+#endif
+ mov x0, #1 // Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ mov x0, xSELF
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ bl MterpLogFallback
+#endif
+MterpCommonFallback:
+ mov x0, #0 // signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* xFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ mov x0, #1 // signal return to caller.
+ b MterpDone
+MterpReturn:
+ ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
+ str x0, [x2]
+ mov x0, #1 // signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ cmp wPROFILE, #0
+ bgt MterpProfileActive // if > 0, we may have some counts to report.
+ .cfi_remember_state
+ RESTORE_TWO_REGS fp, lr, 64
+ RESTORE_TWO_REGS xPC, xFP, 48
+ RESTORE_TWO_REGS xSELF, xINST, 32
+ RESTORE_TWO_REGS xIBASE, xREFS, 16
+ RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
+ ret
+ .cfi_restore_state // Reset unwind info so following code unwinds.
+ .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598
+
+MterpProfileActive:
+ mov xINST, x0 // stash return value
+ /* Report cached hotness counts */
+ ldr x0, [xFP, #OFF_FP_METHOD]
+ add x1, xFP, #OFF_FP_SHADOWFRAME
+ mov x2, xSELF
+ strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
+ bl MterpAddHotnessBatch // (method, shadow_frame, self)
+ mov x0, xINST // restore return value
+ RESTORE_TWO_REGS fp, lr, 64
+ RESTORE_TWO_REGS xPC, xFP, 48
+ RESTORE_TWO_REGS xSELF, xINST, 32
+ RESTORE_TWO_REGS xIBASE, xREFS, 16
+ RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
+ ret
+
+
+%def instruction_end():
+
+ .type artMterpAsmInstructionEnd, #object
+ .hidden artMterpAsmInstructionEnd
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_start():
+
+ .type artMterpAsmInstructionStart, #object
+ .hidden artMterpAsmInstructionStart
+ .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+%def opcode_start():
+ ENTRY Mterp_${opcode}
+%def opcode_end():
+ END Mterp_${opcode}
+%def helper_start(name):
+ ENTRY ${name}
+%def helper_end(name):
+ END ${name}
diff --git a/runtime/interpreter/mterp/arm64/object.S b/runtime/interpreter/mterp/arm64/object.S
new file mode 100644
index 0000000..3cc688e
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/object.S
@@ -0,0 +1,308 @@
+%def field(helper=""):
+ /*
+ * General field read / write (iget-* iput-* sget-* sput-*).
+ */
+ .extern $helper
+ mov x0, xPC // arg0: Instruction* inst
+ mov x1, xINST // arg1: uint16_t inst_data
+ add x2, xFP, #OFF_FP_SHADOWFRAME // arg2: ShadowFrame* sf
+ mov x3, xSELF // arg3: Thread* self
+ PREFETCH_INST 2 // prefetch next opcode
+ bl $helper
+ cbz x0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_check_cast():
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpCheckCast // (index, &obj, method, self)
+ PREFETCH_INST 2
+ cbnz w0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iget(is_object=False, is_wide=False, load="ldr", helper="MterpIGetU32"):
+ // Fast-path which gets the field offset from thread-local cache.
+ add x0, xSELF, #THREAD_INTERPRETER_CACHE_OFFSET // cache address
+ ubfx x1, xPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 // entry index
+ add x0, x0, x1, lsl #4 // entry address within the cache
+ ldp x0, x1, [x0] // entry key (pc) and value (offset)
+ lsr w2, wINST, #12 // B
+ GET_VREG w2, w2 // object we're operating on
+ cmp x0, xPC
+% slow_path_label = add_helper(lambda: field(helper))
+ b.ne ${slow_path_label} // cache miss
+ cbz w2, common_errNullObject // null object
+% if is_wide:
+ ldr x0, [x2, x1] // x0<- obj.field
+% else:
+ ${load} w0, [x2, x1] // w0<- obj.field
+% #endif
+% if is_object:
+ UNPOISON_HEAP_REF w0
+#if defined(USE_READ_BARRIER)
+# if defined(USE_BAKER_READ_BARRIER)
+ ldr w1, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz w1, .L_${opcode}_mark // GC is active.
+.L_${opcode}_marked:
+# else
+ bl artReadBarrierMark // x0 <- artReadBarrierMark(x0)
+# endif
+#endif
+% #endif
+ ubfx w2, wINST, #8, #4 // w2<- A
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+% if is_object:
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+% elif is_wide:
+ SET_VREG_WIDE x0, w2 // fp[A]<- x0
+% else:
+ SET_VREG w0, w2 // fp[A]<- w0
+% #endif
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+% if is_object:
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+.L_${opcode}_mark:
+ bl artReadBarrierMark // x0 <- artReadBarrierMark(x0)
+ b .L_${opcode}_marked
+#endif
+% #endif
+
+%def op_iget_boolean():
+% op_iget(load="ldrb", helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="ldrb")
+
+%def op_iget_byte():
+% op_iget(load="ldrsb", helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="ldrsb")
+
+%def op_iget_char():
+% op_iget(load="ldrh", helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="ldrh")
+
+%def op_iget_object():
+% op_iget(is_object=True, helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ EXPORT_PC
+ GET_VREG w0, w2 // w0<- object we're operating on
+ bl artIGetObjectFromMterp // (obj, offset)
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz w3, MterpPossibleException // bail out
+ SET_VREG_OBJECT w0, w2 // fp[A]<- w0
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iget_quick(load="ldr", extend=""):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cbz w3, common_errNullObject // object was null
+ $load w0, [x3, x1] // w0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $extend
+ SET_VREG w0, w2 // fp[A]<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iget_short():
+% op_iget(load="ldrsh", helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="ldrsh")
+
+%def op_iget_wide():
+% op_iget(is_wide=True, helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w4, 1 // w4<- field byte offset
+ GET_VREG w3, w2 // w3<- object we're operating on
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cbz w3, common_errNullObject // object was null
+ ldr x0, [x3, x4] // x0<- obj.field
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ SET_VREG_WIDE x0, w2
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_instance_of():
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- CCCC
+ lsr w1, wINST, #12 // w1<- B
+ VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
+ ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
+ mov x3, xSELF // w3<- self
+ bl MterpInstanceOf // (index, &obj, method, self)
+ ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ ubfx w2, wINST, #8, #4 // w2<- A
+ PREFETCH_INST 2
+ cbnz x1, MterpException
+ ADVANCE 2 // advance rPC
+ SET_VREG w0, w2 // vA<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iput(helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(store="strb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(store="strb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(store="strh")
+
+%def op_iput_object():
+% op_iput(helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xPC
+ mov w2, wINST
+ bl MterpIputObjectQuick
+ cbz w0, MterpException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iput_quick(store="str"):
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w1, 1 // w1<- field byte offset
+ GET_VREG w3, w2 // w3<- fp[B], the object pointer
+ ubfx w2, wINST, #8, #4 // w2<- A
+ cbz w3, common_errNullObject // object was null
+ GET_VREG w0, w2 // w0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $store w0, [x3, x1] // obj.field<- w0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(store="strh")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ lsr w2, wINST, #12 // w2<- B
+ FETCH w3, 1 // w3<- field byte offset
+ GET_VREG w2, w2 // w2<- fp[B], the object pointer
+ ubfx w0, wINST, #8, #4 // w0<- A
+ cbz w2, common_errNullObject // object was null
+ GET_VREG_WIDE x0, w0 // x0<- fp[A]
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ str x0, [x2, x3] // obj.field<- x0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_new_instance():
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ EXPORT_PC
+ add x0, xFP, #OFF_FP_SHADOWFRAME
+ mov x1, xSELF
+ mov w2, wINST
+ bl MterpNewInstance // (shadow_frame, self, inst_data)
+ cbz w0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_sget(helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/arm64/op_add_double.S b/runtime/interpreter/mterp/arm64/op_add_double.S
deleted file mode 100644
index 8509f70..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fadd d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_double_2addr.S b/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
deleted file mode 100644
index 61fd58f..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fadd d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float.S b/runtime/interpreter/mterp/arm64/op_add_float.S
deleted file mode 100644
index 7d09fef..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fadd s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_float_2addr.S b/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
deleted file mode 100644
index 7b378e2..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fadd s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int.S b/runtime/interpreter/mterp/arm64/op_add_int.S
deleted file mode 100644
index 6eadb54..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_2addr.S b/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
deleted file mode 100644
index d35bc8e..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit16.S b/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
deleted file mode 100644
index 4930ad7..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"add w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S b/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
deleted file mode 100644
index 2dfb8b9..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"add w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long.S b/runtime/interpreter/mterp/arm64/op_add_long.S
deleted file mode 100644
index bc334aa..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"add x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_add_long_2addr.S b/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
deleted file mode 100644
index 5e5dbce..0000000
--- a/runtime/interpreter/mterp/arm64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"add x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aget.S b/runtime/interpreter/mterp/arm64/op_aget.S
deleted file mode 100644
index 662c9cc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #$shift // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $load w2, [x0, #$data_offset] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_boolean.S b/runtime/interpreter/mterp/arm64/op_aget_boolean.S
deleted file mode 100644
index 6ab6cc1..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_byte.S b/runtime/interpreter/mterp/arm64/op_aget_byte.S
deleted file mode 100644
index c7f5b23..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_char.S b/runtime/interpreter/mterp/arm64/op_aget_char.S
deleted file mode 100644
index 9fddf17..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_object.S b/runtime/interpreter/mterp/arm64/op_aget_object.S
deleted file mode 100644
index 1bbe3e8..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_object.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- FETCH_B w3, 1, 1 // w3<- CC
- EXPORT_PC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- bl artAGetObjectFromMterp // (array, index)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w9<- AA
- PREFETCH_INST 2
- cbnz w1, MterpException
- SET_VREG_OBJECT w0, w2
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aget_short.S b/runtime/interpreter/mterp/arm64/op_aget_short.S
deleted file mode 100644
index 39554de..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aget_wide.S b/runtime/interpreter/mterp/arm64/op_aget_wide.S
deleted file mode 100644
index 6f990ba..0000000
--- a/runtime/interpreter/mterp/arm64/op_aget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // yes, bail
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x2, w4
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_and_int.S b/runtime/interpreter/mterp/arm64/op_and_int.S
deleted file mode 100644
index 31f3f73..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_2addr.S b/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
deleted file mode 100644
index e59632c..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit16.S b/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
deleted file mode 100644
index 6540f81..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"and w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S b/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
deleted file mode 100644
index 495b5cd..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"and w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long.S b/runtime/interpreter/mterp/arm64/op_and_long.S
deleted file mode 100644
index ede047d..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"and x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_and_long_2addr.S b/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
deleted file mode 100644
index d62ccef..0000000
--- a/runtime/interpreter/mterp/arm64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"and x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_aput.S b/runtime/interpreter/mterp/arm64/op_aput.S
deleted file mode 100644
index 175b483..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #$shift // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- $store w2, [x0, #$data_offset] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_boolean.S b/runtime/interpreter/mterp/arm64/op_aput_boolean.S
deleted file mode 100644
index 5e7a86f..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_byte.S b/runtime/interpreter/mterp/arm64/op_aput_byte.S
deleted file mode 100644
index d659ebc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_char.S b/runtime/interpreter/mterp/arm64/op_aput_char.S
deleted file mode 100644
index 7547c80..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_object.S b/runtime/interpreter/mterp/arm64/op_aput_object.S
deleted file mode 100644
index 0146fdc..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpAputObject
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_aput_short.S b/runtime/interpreter/mterp/arm64/op_aput_short.S
deleted file mode 100644
index 8631e28..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/arm64/op_aput_wide.S b/runtime/interpreter/mterp/arm64/op_aput_wide.S
deleted file mode 100644
index e1cf9c1..0000000
--- a/runtime/interpreter/mterp/arm64/op_aput_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- GET_VREG_WIDE x1, w4
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_array_length.S b/runtime/interpreter/mterp/arm64/op_array_length.S
deleted file mode 100644
index 0cce917..0000000
--- a/runtime/interpreter/mterp/arm64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /*
- * Return the length of an array.
- */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w0, w1 // w0<- vB (object ref)
- cbz w0, common_errNullObject // yup, fail
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w3, w2 // vB<- length
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_check_cast.S b/runtime/interpreter/mterp/arm64/op_check_cast.S
deleted file mode 100644
index cb9f606..0000000
--- a/runtime/interpreter/mterp/arm64/op_check_cast.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpCheckCast // (index, &obj, method, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmp_long.S b/runtime/interpreter/mterp/arm64/op_cmp_long.S
deleted file mode 100644
index c4ad984..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmp_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE x1, w2
- GET_VREG_WIDE x2, w3
- cmp x1, x2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG w0, w4
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_double.S b/runtime/interpreter/mterp/arm64/op_cmpg_double.S
deleted file mode 100644
index 30cb7eb..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "cond":"cc"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpg_float.S b/runtime/interpreter/mterp/arm64/op_cmpg_float.S
deleted file mode 100644
index ba23f43..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "cond":"cc"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_double.S b/runtime/interpreter/mterp/arm64/op_cmpl_double.S
deleted file mode 100644
index c739685..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"_WIDE", "r1":"d1", "r2":"d2", "cond":"lt"}
diff --git a/runtime/interpreter/mterp/arm64/op_cmpl_float.S b/runtime/interpreter/mterp/arm64/op_cmpl_float.S
deleted file mode 100644
index 32a9319..0000000
--- a/runtime/interpreter/mterp/arm64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fcmp.S" {"wide":"", "r1":"s1", "r2":"s2", "cond":"lt"}
diff --git a/runtime/interpreter/mterp/arm64/op_const.S b/runtime/interpreter/mterp/arm64/op_const.S
deleted file mode 100644
index 031ede1..0000000
--- a/runtime/interpreter/mterp/arm64/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const vAA, #+BBBBbbbb */
- lsr w3, wINST, #8 // w3<- AA
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w1, 2 // w1<- BBBB (high
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w3 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_16.S b/runtime/interpreter/mterp/arm64/op_const_16.S
deleted file mode 100644
index f0e8192..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance xPC, load wINST
- SET_VREG w0, w3 // vAA<- w0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_4.S b/runtime/interpreter/mterp/arm64/op_const_4.S
deleted file mode 100644
index 9a36115..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/4 vA, #+B */
- sbfx w1, wINST, #12, #4 // w1<- sssssssB
- ubfx w0, wINST, #8, #4 // w0<- A
- FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- GET_INST_OPCODE ip // ip<- opcode from xINST
- SET_VREG w1, w0 // fp[A]<- w1
- GOTO_OPCODE ip // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_class.S b/runtime/interpreter/mterp/arm64/op_const_class.S
deleted file mode 100644
index 7228245..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_high16.S b/runtime/interpreter/mterp/arm64/op_const_high16.S
deleted file mode 100644
index 3a9edff..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
- lsr w3, wINST, #8 // r3<- AA
- lsl w0, w0, #16 // r0<- BBBB0000
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- SET_VREG w0, w3 // vAA<- r0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_handle.S b/runtime/interpreter/mterp/arm64/op_const_method_handle.S
deleted file mode 100644
index 0df0fa6..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_method_type.S b/runtime/interpreter/mterp/arm64/op_const_method_type.S
deleted file mode 100644
index 1adfe5a..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_string.S b/runtime/interpreter/mterp/arm64/op_const_string.S
deleted file mode 100644
index 8cf0d6d..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
deleted file mode 100644
index e1a7339..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* const/string vAA, String//BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w2, 2 // w2<- BBBB (high
- lsr w1, wINST, #8 // w1<- AA
- orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 // advance rPC
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 3 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide.S b/runtime/interpreter/mterp/arm64/op_const_wide.S
deleted file mode 100644
index 8f57dda..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH w0, 1 // w0<- bbbb (low)
- FETCH w1, 2 // w1<- BBBB (low middle)
- FETCH w2, 3 // w2<- hhhh (high middle)
- FETCH w3, 4 // w3<- HHHH (high)
- lsr w4, wINST, #8 // r4<- AA
- FETCH_ADVANCE_INST 5 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
- orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_16.S b/runtime/interpreter/mterp/arm64/op_const_wide_16.S
deleted file mode 100644
index 553d481..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S x0, 1 // x0<- ssssssssssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_32.S b/runtime/interpreter/mterp/arm64/op_const_wide_32.S
deleted file mode 100644
index 9dc4fc3..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH w0, 1 // x0<- 000000000000bbbb (low)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_S x2, 2 // x2<- ssssssssssssBBBB (high)
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr x0, x0, x2, lsl #16 // x0<- ssssssssBBBBbbbb
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_const_wide_high16.S b/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
deleted file mode 100644
index 94ab987..0000000
--- a/runtime/interpreter/mterp/arm64/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
- lsr w1, wINST, #8 // w1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- lsl x0, x0, #48
- SET_VREG_WIDE x0, w1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_div_double.S b/runtime/interpreter/mterp/arm64/op_div_double.S
deleted file mode 100644
index 1f7dad0..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fdiv d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_double_2addr.S b/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
deleted file mode 100644
index 414a175..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fdiv d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float.S b/runtime/interpreter/mterp/arm64/op_div_float.S
deleted file mode 100644
index f24a26c..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fdiv s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_float_2addr.S b/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
deleted file mode 100644
index 2888049..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fdiv s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int.S b/runtime/interpreter/mterp/arm64/op_div_int.S
deleted file mode 100644
index 88371c0..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_2addr.S b/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
deleted file mode 100644
index 5f5a80f..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit16.S b/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
deleted file mode 100644
index dc7a484..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_int_lit8.S b/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
deleted file mode 100644
index c06521c..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"instr":"sdiv w0, w0, w1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long.S b/runtime/interpreter/mterp/arm64/op_div_long.S
deleted file mode 100644
index 820ae3d..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"sdiv x0, x1, x2", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_div_long_2addr.S b/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
deleted file mode 100644
index da7eabd..0000000
--- a/runtime/interpreter/mterp/arm64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"sdiv x0, x0, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_float.S b/runtime/interpreter/mterp/arm64/op_double_to_float.S
deleted file mode 100644
index c1555fd..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"fcvt s0, d0", "srcreg":"d0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_int.S b/runtime/interpreter/mterp/arm64/op_double_to_int.S
deleted file mode 100644
index 7244bac..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"fcvtzs w0, d0", "srcreg":"d0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_double_to_long.S b/runtime/interpreter/mterp/arm64/op_double_to_long.S
deleted file mode 100644
index 741160b..0000000
--- a/runtime/interpreter/mterp/arm64/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWide.S" {"instr":"fcvtzs x0, d0", "srcreg":"d0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_fill_array_data.S b/runtime/interpreter/mterp/arm64/op_fill_array_data.S
deleted file mode 100644
index 86fa6db..0000000
--- a/runtime/interpreter/mterp/arm64/op_fill_array_data.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x1, x0, x1, lsl #16 // x1<- ssssssssBBBBbbbb
- GET_VREG w0, w3 // w0<- vAA (array object)
- add x1, xPC, x1, lsl #1 // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
- bl MterpFillArrayData // (obj, payload)
- cbz w0, MterpPossibleException // exception?
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array.S b/runtime/interpreter/mterp/arm64/op_filled_new_array.S
deleted file mode 100644
index 806a1b1..0000000
--- a/runtime/interpreter/mterp/arm64/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern $helper
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl $helper
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S b/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
deleted file mode 100644
index 3c9a419..0000000
--- a/runtime/interpreter/mterp/arm64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_double.S b/runtime/interpreter/mterp/arm64/op_float_to_double.S
deleted file mode 100644
index 892feca..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"fcvt d0, s0", "srcreg":"s0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_int.S b/runtime/interpreter/mterp/arm64/op_float_to_int.S
deleted file mode 100644
index c849d81..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrow.S" {"instr":"fcvtzs w0, s0", "srcreg":"s0", "tgtreg":"w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_float_to_long.S b/runtime/interpreter/mterp/arm64/op_float_to_long.S
deleted file mode 100644
index c3de16f..0000000
--- a/runtime/interpreter/mterp/arm64/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"fcvtzs x0, s0", "srcreg":"s0", "tgtreg":"x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_goto.S b/runtime/interpreter/mterp/arm64/op_goto.S
deleted file mode 100644
index 6381e94..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx wINST, wINST, #8, #8 // wINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_goto_16.S b/runtime/interpreter/mterp/arm64/op_goto_16.S
deleted file mode 100644
index fb9a80a..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S wINST, 1 // wINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_goto_32.S b/runtime/interpreter/mterp/arm64/op_goto_32.S
deleted file mode 100644
index b13cb41..0000000
--- a/runtime/interpreter/mterp/arm64/op_goto_32.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH w0, 1 // w0<- aaaa (lo)
- FETCH w1, 2 // w1<- AAAA (hi)
- orr wINST, w0, w1, lsl #16 // wINST<- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_if_eq.S b/runtime/interpreter/mterp/arm64/op_if_eq.S
deleted file mode 100644
index aa4a0f1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_eqz.S b/runtime/interpreter/mterp/arm64/op_if_eqz.S
deleted file mode 100644
index 47c1dee..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"cbz w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ge.S b/runtime/interpreter/mterp/arm64/op_if_ge.S
deleted file mode 100644
index d6ec761..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gez.S b/runtime/interpreter/mterp/arm64/op_if_gez.S
deleted file mode 100644
index 087e094..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"tbz w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gt.S b/runtime/interpreter/mterp/arm64/op_if_gt.S
deleted file mode 100644
index 7db8e9d..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_gtz.S b/runtime/interpreter/mterp/arm64/op_if_gtz.S
deleted file mode 100644
index 476b265..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "branch":"b.gt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_le.S b/runtime/interpreter/mterp/arm64/op_if_le.S
deleted file mode 100644
index ca3a83f..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lez.S b/runtime/interpreter/mterp/arm64/op_if_lez.S
deleted file mode 100644
index 2717a60..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "branch":"b.le" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_lt.S b/runtime/interpreter/mterp/arm64/op_if_lt.S
deleted file mode 100644
index 56450a1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ltz.S b/runtime/interpreter/mterp/arm64/op_if_ltz.S
deleted file mode 100644
index 86089c1..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"tbnz w2, #31," }
diff --git a/runtime/interpreter/mterp/arm64/op_if_ne.S b/runtime/interpreter/mterp/arm64/op_if_ne.S
deleted file mode 100644
index 14d9e13..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/arm64/op_if_nez.S b/runtime/interpreter/mterp/arm64/op_if_nez.S
deleted file mode 100644
index efacc88..0000000
--- a/runtime/interpreter/mterp/arm64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/zcmp.S" { "compare":"0", "branch":"cbnz w2," }
diff --git a/runtime/interpreter/mterp/arm64/op_iget.S b/runtime/interpreter/mterp/arm64/op_iget.S
deleted file mode 100644
index cb453ac..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "extend":"", "is_object":"0", "helper":"MterpIGetU32"}
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl $helper
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- $extend
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if $is_object
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean.S b/runtime/interpreter/mterp/arm64/op_iget_boolean.S
deleted file mode 100644
index 3b17144..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetU8", "extend":"uxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
deleted file mode 100644
index 2ceccb9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte.S b/runtime/interpreter/mterp/arm64/op_iget_byte.S
deleted file mode 100644
index d5ef1d3..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetI8", "extend":"sxtb w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
deleted file mode 100644
index 6e97b72..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrsb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char.S b/runtime/interpreter/mterp/arm64/op_iget_char.S
deleted file mode 100644
index 68e1435..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetU16", "extend":"uxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_char_quick.S b/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
deleted file mode 100644
index 325dd1c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object.S b/runtime/interpreter/mterp/arm64/op_iget_object.S
deleted file mode 100644
index 40ddadd..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_object_quick.S b/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
deleted file mode 100644
index e9a797d..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_object_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- EXPORT_PC
- GET_VREG w0, w2 // w0<- object we're operating on
- bl artIGetObjectFromMterp // (obj, offset)
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz w3, MterpPossibleException // bail out
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_quick.S b/runtime/interpreter/mterp/arm64/op_iget_quick.S
deleted file mode 100644
index 699b2c4..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"ldr", "extend":"" }
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- $load w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $extend
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short.S b/runtime/interpreter/mterp/arm64/op_iget_short.S
deleted file mode 100644
index 714f4b9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget.S" { "helper":"MterpIGetI16", "extend":"sxth w0, w0" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_short_quick.S b/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
deleted file mode 100644
index 8367070..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iget_quick.S" { "load":"ldrsh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide.S b/runtime/interpreter/mterp/arm64/op_iget_wide.S
deleted file mode 100644
index 4fc735c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetU64
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cmp w3, #0
- cbnz w3, MterpException // bail out
- SET_VREG_WIDE x0, w2
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
deleted file mode 100644
index e9388e4..0000000
--- a/runtime/interpreter/mterp/arm64/op_iget_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* iget-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w4, 1 // w4<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldr x0, [x3, x4] // x0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG_WIDE x0, w2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_instance_of.S b/runtime/interpreter/mterp/arm64/op_instance_of.S
deleted file mode 100644
index a56705a..0000000
--- a/runtime/interpreter/mterp/arm64/op_instance_of.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- EXPORT_PC
- FETCH w0, 1 // w0<- CCCC
- lsr w1, wINST, #12 // w1<- B
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpInstanceOf // (index, &obj, method, self)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x1, MterpException
- ADVANCE 2 // advance rPC
- SET_VREG w0, w2 // vA<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_byte.S b/runtime/interpreter/mterp/arm64/op_int_to_byte.S
deleted file mode 100644
index 43f8148..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_char.S b/runtime/interpreter/mterp/arm64/op_int_to_char.S
deleted file mode 100644
index f092170..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_double.S b/runtime/interpreter/mterp/arm64/op_int_to_double.S
deleted file mode 100644
index 3dee75a..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWider.S" {"instr":"scvtf d0, w0", "srcreg":"w0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_float.S b/runtime/interpreter/mterp/arm64/op_int_to_float.S
deleted file mode 100644
index 3ebbdc7..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrow.S" {"instr":"scvtf s0, w0", "srcreg":"w0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_long.S b/runtime/interpreter/mterp/arm64/op_int_to_long.S
deleted file mode 100644
index 45e3112..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* int-to-long vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // fp[A]<- x0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_int_to_short.S b/runtime/interpreter/mterp/arm64/op_int_to_short.S
deleted file mode 100644
index 87fb804..0000000
--- a/runtime/interpreter/mterp/arm64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom.S b/runtime/interpreter/mterp/arm64/op_invoke_custom.S
deleted file mode 100644
index 3686584..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S b/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
deleted file mode 100644
index 06de86a..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct.S b/runtime/interpreter/mterp/arm64/op_invoke_direct.S
deleted file mode 100644
index c117232..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S b/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
deleted file mode 100644
index efc54c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface.S b/runtime/interpreter/mterp/arm64/op_invoke_interface.S
deleted file mode 100644
index 12dfa59..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeInterface" }
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S b/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
deleted file mode 100644
index 61caaf4..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
deleted file mode 100644
index aace98f..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 30c8c09..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static.S b/runtime/interpreter/mterp/arm64/op_invoke_static.S
deleted file mode 100644
index 634eda2..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_static_range.S b/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
deleted file mode 100644
index 32cdcdd..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super.S b/runtime/interpreter/mterp/arm64/op_invoke_super.S
deleted file mode 100644
index def2c55..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeSuper" }
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_super_range.S b/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
deleted file mode 100644
index 27fb859..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
deleted file mode 100644
index 66d0502..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtual" }
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
deleted file mode 100644
index 4300c34..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
deleted file mode 100644
index b43955c..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 90c7b65..0000000
--- a/runtime/interpreter/mterp/arm64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput.S b/runtime/interpreter/mterp/arm64/op_iput.S
deleted file mode 100644
index 5e21d5c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIPutU32" }
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern $helper
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl $helper
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean.S b/runtime/interpreter/mterp/arm64/op_iput_boolean.S
deleted file mode 100644
index 12a278c..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
deleted file mode 100644
index 25c61d7..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte.S b/runtime/interpreter/mterp/arm64/op_iput_byte.S
deleted file mode 100644
index 82b99e9..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S b/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
deleted file mode 100644
index 25c61d7..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strb" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char.S b/runtime/interpreter/mterp/arm64/op_iput_char.S
deleted file mode 100644
index 427d92d..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_char_quick.S b/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
deleted file mode 100644
index c6ef46a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object.S b/runtime/interpreter/mterp/arm64/op_iput_object.S
deleted file mode 100644
index 0c0441a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_object.S
+++ /dev/null
@@ -1,10 +0,0 @@
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpIPutObj
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_object_quick.S b/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
deleted file mode 100644
index 6fbf2b1..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_object_quick.S
+++ /dev/null
@@ -1,9 +0,0 @@
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpIputObjectQuick
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_quick.S b/runtime/interpreter/mterp/arm64/op_iput_quick.S
deleted file mode 100644
index e95da76..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "store":"str" }
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $store w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short.S b/runtime/interpreter/mterp/arm64/op_iput_short.S
deleted file mode 100644
index 67f1ace..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_short_quick.S b/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
deleted file mode 100644
index c6ef46a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_iput_quick.S" { "store":"strh" }
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S
deleted file mode 100644
index be6aeb0..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* iput-wide vA, vB, field//CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU64
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
deleted file mode 100644
index 28e831a..0000000
--- a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* iput-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w3, 1 // w3<- field byte offset
- GET_VREG w2, w2 // w2<- fp[B], the object pointer
- ubfx w0, wINST, #8, #4 // w0<- A
- cbz w2, common_errNullObject // object was null
- GET_VREG_WIDE x0, w0 // x0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- str x0, [x2, x3] // obj.field<- x0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_double.S b/runtime/interpreter/mterp/arm64/op_long_to_double.S
deleted file mode 100644
index a3f59c2..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopWide.S" {"instr":"scvtf d0, x0", "srcreg":"x0", "tgtreg":"d0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_float.S b/runtime/interpreter/mterp/arm64/op_long_to_float.S
deleted file mode 100644
index e9c9145..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/funopNarrower.S" {"instr":"scvtf s0, x0", "srcreg":"x0", "tgtreg":"s0"}
diff --git a/runtime/interpreter/mterp/arm64/op_long_to_int.S b/runtime/interpreter/mterp/arm64/op_long_to_int.S
deleted file mode 100644
index 73f58d8..0000000
--- a/runtime/interpreter/mterp/arm64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "arm64/op_move.S"
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_enter.S b/runtime/interpreter/mterp/arm64/op_monitor_enter.S
deleted file mode 100644
index 6fbd9ae..0000000
--- a/runtime/interpreter/mterp/arm64/op_monitor_enter.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w1<- self
- bl artLockObjectFromCode
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_monitor_exit.S b/runtime/interpreter/mterp/arm64/op_monitor_exit.S
deleted file mode 100644
index 26e2d8d..0000000
--- a/runtime/interpreter/mterp/arm64/op_monitor_exit.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w0<- self
- bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move.S b/runtime/interpreter/mterp/arm64/op_move.S
deleted file mode 100644
index 195b7eb..0000000
--- a/runtime/interpreter/mterp/arm64/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_16.S b/runtime/interpreter/mterp/arm64/op_move_16.S
deleted file mode 100644
index 5146e3d..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_exception.S b/runtime/interpreter/mterp/arm64/op_move_exception.S
deleted file mode 100644
index b29298f..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-exception vAA */
- lsr w2, wINST, #8 // w2<- AA
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- mov x1, #0 // w1<- 0
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
- GET_INST_OPCODE ip // extract opcode from rINST
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_from16.S b/runtime/interpreter/mterp/arm64/op_move_from16.S
deleted file mode 100644
index 78f344d..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_object.S b/runtime/interpreter/mterp/arm64/op_move_object.S
deleted file mode 100644
index a5adc59..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_16.S b/runtime/interpreter/mterp/arm64/op_move_object_16.S
deleted file mode 100644
index ef86c45..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_object_from16.S b/runtime/interpreter/mterp/arm64/op_move_object_from16.S
deleted file mode 100644
index 0c73b3b..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result.S b/runtime/interpreter/mterp/arm64/op_move_result.S
deleted file mode 100644
index 06fe962..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if $is_object
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_object.S b/runtime/interpreter/mterp/arm64/op_move_result_object.S
deleted file mode 100644
index da2bbee..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_move_result_wide.S b/runtime/interpreter/mterp/arm64/op_move_result_wide.S
deleted file mode 100644
index f90a33f..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_result_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* for: move-result-wide */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr x0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, x2 // fp[AA]<- r0
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide.S b/runtime/interpreter/mterp/arm64/op_move_wide.S
deleted file mode 100644
index 538f079..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lsr w3, wINST, #12 // w3<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_16.S b/runtime/interpreter/mterp/arm64/op_move_wide_16.S
deleted file mode 100644
index c79cdc50..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 2 // w3<- BBBB
- FETCH w2, 1 // w2<- AAAA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- SET_VREG_WIDE x3, w2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_move_wide_from16.S b/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
deleted file mode 100644
index 70dbe99..0000000
--- a/runtime/interpreter/mterp/arm64/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 1 // w3<- BBBB
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double.S b/runtime/interpreter/mterp/arm64/op_mul_double.S
deleted file mode 100644
index 8d35b81..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fmul d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
deleted file mode 100644
index 526cb3b..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fmul d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float.S b/runtime/interpreter/mterp/arm64/op_mul_float.S
deleted file mode 100644
index eea7733..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fmul s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
deleted file mode 100644
index c1f2376..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fmul s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int.S b/runtime/interpreter/mterp/arm64/op_mul_int.S
deleted file mode 100644
index d14cae1..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binop.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
deleted file mode 100644
index f079118..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_2addr.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binop2addr.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
deleted file mode 100644
index a378559..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_lit16.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binopLit16.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S b/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
deleted file mode 100644
index b3d4014..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_int_lit8.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-%include "arm64/binopLit8.S" {"instr":"mul w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long.S b/runtime/interpreter/mterp/arm64/op_mul_long.S
deleted file mode 100644
index bc0dcbd..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"mul x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
deleted file mode 100644
index fa1cdf8..0000000
--- a/runtime/interpreter/mterp/arm64/op_mul_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"mul x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_double.S b/runtime/interpreter/mterp/arm64/op_neg_double.S
deleted file mode 100644
index d77859d..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"eor x0, x0, #0x8000000000000000"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_float.S b/runtime/interpreter/mterp/arm64/op_neg_float.S
deleted file mode 100644
index 6652aec..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"eor w0, w0, #0x80000000"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_int.S b/runtime/interpreter/mterp/arm64/op_neg_int.S
deleted file mode 100644
index 59c14a9..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"sub w0, wzr, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_neg_long.S b/runtime/interpreter/mterp/arm64/op_neg_long.S
deleted file mode 100644
index 0c71ea7..0000000
--- a/runtime/interpreter/mterp/arm64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"sub x0, xzr, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_new_array.S b/runtime/interpreter/mterp/arm64/op_new_array.S
deleted file mode 100644
index 886120a..0000000
--- a/runtime/interpreter/mterp/arm64/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_new_instance.S b/runtime/interpreter/mterp/arm64/op_new_instance.S
deleted file mode 100644
index c171ac5..0000000
--- a/runtime/interpreter/mterp/arm64/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xSELF
- mov w2, wINST
- bl MterpNewInstance // (shadow_frame, self, inst_data)
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_nop.S b/runtime/interpreter/mterp/arm64/op_nop.S
deleted file mode 100644
index 80c2d45..0000000
--- a/runtime/interpreter/mterp/arm64/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
- FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
- GET_INST_OPCODE ip // ip<- opcode from rINST
- GOTO_OPCODE ip // execute it
diff --git a/runtime/interpreter/mterp/arm64/op_not_int.S b/runtime/interpreter/mterp/arm64/op_not_int.S
deleted file mode 100644
index 55d7750..0000000
--- a/runtime/interpreter/mterp/arm64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unop.S" {"instr":"mvn w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_not_long.S b/runtime/interpreter/mterp/arm64/op_not_long.S
deleted file mode 100644
index e5ebdd6..0000000
--- a/runtime/interpreter/mterp/arm64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unopWide.S" {"instr":"mvn x0, x0"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int.S b/runtime/interpreter/mterp/arm64/op_or_int.S
deleted file mode 100644
index 648c1e6..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_2addr.S b/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
deleted file mode 100644
index abdf599..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit16.S b/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
deleted file mode 100644
index db7f4ff..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"orr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S b/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
deleted file mode 100644
index 7cb26b7..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"orr w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long.S b/runtime/interpreter/mterp/arm64/op_or_long.S
deleted file mode 100644
index dd137ce..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"orr x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_or_long_2addr.S b/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
deleted file mode 100644
index f785230..0000000
--- a/runtime/interpreter/mterp/arm64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"orr x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_packed_switch.S b/runtime/interpreter/mterp/arm64/op_packed_switch.S
deleted file mode 100644
index 408e030..0000000
--- a/runtime/interpreter/mterp/arm64/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl $func // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double.S b/runtime/interpreter/mterp/arm64/op_rem_double.S
deleted file mode 100644
index c631ddb..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_double.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* rem vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d1, w2 // d1<- vCC
- GET_VREG_WIDE d0, w1 // d0<- vBB
- bl fmod
- lsr w4, wINST, #8 // w4<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
deleted file mode 100644
index 9868f41..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* rem vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // d1<- vB
- GET_VREG_WIDE d0, w2 // d0<- vA
- bl fmod
- ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float.S b/runtime/interpreter/mterp/arm64/op_rem_float.S
deleted file mode 100644
index 73f7060..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_float.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* EABI doesn't define a float remainder function, but libm does */
-%include "arm64/fbinop.S" {"instr":"bl fmodf"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
deleted file mode 100644
index 95f81c5..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* rem vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- bl fmodf
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w9
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int.S b/runtime/interpreter/mterp/arm64/op_rem_int.S
deleted file mode 100644
index dd9dfda..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"preinstr":"sdiv w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
deleted file mode 100644
index 57fc4971..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"preinstr":"sdiv w2, w0, w1", "instr":"msub w0, w2, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
deleted file mode 100644
index b51a739..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
deleted file mode 100644
index 03ea324..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"preinstr":"sdiv w3, w0, w1", "instr":"msub w0, w3, w1, w0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long.S b/runtime/interpreter/mterp/arm64/op_rem_long.S
deleted file mode 100644
index f133f86..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"preinstr":"sdiv x3, x1, x2","instr":"msub x0, x3, x2, x1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S b/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
deleted file mode 100644
index b45e2a9..0000000
--- a/runtime/interpreter/mterp/arm64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"preinstr":"sdiv x3, x0, x1", "instr":"msub x0, x3, x1, x0", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/arm64/op_return.S b/runtime/interpreter/mterp/arm64/op_return.S
deleted file mode 100644
index 9f125c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_return.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_object.S b/runtime/interpreter/mterp/arm64/op_return_object.S
deleted file mode 100644
index b6cb532..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_return.S"
diff --git a/runtime/interpreter/mterp/arm64/op_return_void.S b/runtime/interpreter/mterp/arm64/op_return_void.S
deleted file mode 100644
index b253006..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_void.S
+++ /dev/null
@@ -1,12 +0,0 @@
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- mov x0, #0
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
deleted file mode 100644
index c817169..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,10 +0,0 @@
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- mov x0, #0
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_return_wide.S b/runtime/interpreter/mterp/arm64/op_return_wide.S
deleted file mode 100644
index c47661c..0000000
--- a/runtime/interpreter/mterp/arm64/op_return_wide.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L${opcode}_check
-.L${opcode}_return:
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x0, w2 // x0<- vAA
- b MterpReturn
-.L${opcode}_check:
- bl MterpSuspendCheck // (self)
- b .L${opcode}_return
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int.S b/runtime/interpreter/mterp/arm64/op_rsub_int.S
deleted file mode 100644
index 3bf45fe..0000000
--- a/runtime/interpreter/mterp/arm64/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "arm64/binopLit16.S" {"instr":"sub w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
deleted file mode 100644
index 7a3572b..0000000
--- a/runtime/interpreter/mterp/arm64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"instr":"sub w0, w1, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget.S b/runtime/interpreter/mterp/arm64/op_sget.S
deleted file mode 100644
index 00b07fa..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32", "extend":"" }
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern $helper
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl $helper
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
- $extend
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if $is_object
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
diff --git a/runtime/interpreter/mterp/arm64/op_sget_boolean.S b/runtime/interpreter/mterp/arm64/op_sget_boolean.S
deleted file mode 100644
index 73f3a10..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetU8", "extend":"uxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_byte.S b/runtime/interpreter/mterp/arm64/op_sget_byte.S
deleted file mode 100644
index 38c0da6..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetI8", "extend":"sxtb w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_char.S b/runtime/interpreter/mterp/arm64/op_sget_char.S
deleted file mode 100644
index c0801bf..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetU16", "extend":"uxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_object.S b/runtime/interpreter/mterp/arm64/op_sget_object.S
deleted file mode 100644
index 69d6adb..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_short.S b/runtime/interpreter/mterp/arm64/op_sget_short.S
deleted file mode 100644
index 81e0434..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sget.S" {"helper":"MterpSGetI16", "extend":"sxth w0, w0"}
diff --git a/runtime/interpreter/mterp/arm64/op_sget_wide.S b/runtime/interpreter/mterp/arm64/op_sget_wide.S
deleted file mode 100644
index 546ab94..0000000
--- a/runtime/interpreter/mterp/arm64/op_sget_wide.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field//BBBB */
-
- .extern MterpSGetU64
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetU64
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w4, wINST, #8 // w4<- AA
- cbnz x3, MterpException // bail out
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG_WIDE x0, w4
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int.S b/runtime/interpreter/mterp/arm64/op_shl_int.S
deleted file mode 100644
index 3062a3f..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
deleted file mode 100644
index 9a7e09f..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
deleted file mode 100644
index 9c19b55..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"lsl w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long.S b/runtime/interpreter/mterp/arm64/op_shl_long.S
deleted file mode 100644
index bbf9600..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
deleted file mode 100644
index a5c4013..0000000
--- a/runtime/interpreter/mterp/arm64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"lsl"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int.S b/runtime/interpreter/mterp/arm64/op_shr_int.S
deleted file mode 100644
index 493b740..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
deleted file mode 100644
index 6efe8ee..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
deleted file mode 100644
index c7b61df..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"asr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long.S b/runtime/interpreter/mterp/arm64/op_shr_long.S
deleted file mode 100644
index 4d33235..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
deleted file mode 100644
index 0a4a386..0000000
--- a/runtime/interpreter/mterp/arm64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"asr"}
diff --git a/runtime/interpreter/mterp/arm64/op_sparse_switch.S b/runtime/interpreter/mterp/arm64/op_sparse_switch.S
deleted file mode 100644
index 5a8d748..0000000
--- a/runtime/interpreter/mterp/arm64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/arm64/op_sput.S b/runtime/interpreter/mterp/arm64/op_sput.S
deleted file mode 100644
index 7a0dc30..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"MterpSPutU32"}
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl $helper
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sput_boolean.S b/runtime/interpreter/mterp/arm64/op_sput_boolean.S
deleted file mode 100644
index 3d0c7c0..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_byte.S b/runtime/interpreter/mterp/arm64/op_sput_byte.S
deleted file mode 100644
index 489cf92..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_char.S b/runtime/interpreter/mterp/arm64/op_sput_char.S
deleted file mode 100644
index f79d311..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_object.S b/runtime/interpreter/mterp/arm64/op_sput_object.S
deleted file mode 100644
index a649656..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_object.S
+++ /dev/null
@@ -1,10 +0,0 @@
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xINST
- mov x3, xSELF
- bl MterpSPutObj
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sput_short.S b/runtime/interpreter/mterp/arm64/op_sput_short.S
deleted file mode 100644
index 06482cd..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S
deleted file mode 100644
index 58b3c42..0000000
--- a/runtime/interpreter/mterp/arm64/op_sput_wide.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field//BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutU64
- cbnz w0, MterpException // 0 on success, -1 on failure
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double.S b/runtime/interpreter/mterp/arm64/op_sub_double.S
deleted file mode 100644
index e8e3401..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"fsub d0, d1, d2", "result":"d0", "r1":"d1", "r2":"d2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
deleted file mode 100644
index ddab55e..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"fsub d0, d0, d1", "r0":"d0", "r1":"d1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float.S b/runtime/interpreter/mterp/arm64/op_sub_float.S
deleted file mode 100644
index 227b15f..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop.S" {"instr":"fsub s0, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
deleted file mode 100644
index 19ac8d5..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/fbinop2addr.S" {"instr":"fsub s2, s0, s1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int.S b/runtime/interpreter/mterp/arm64/op_sub_int.S
deleted file mode 100644
index 0e7ce0e..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"sub w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
deleted file mode 100644
index d2c1bd3..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"sub w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long.S b/runtime/interpreter/mterp/arm64/op_sub_long.S
deleted file mode 100644
index 263c70d..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"sub x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S b/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
deleted file mode 100644
index 5be3772..0000000
--- a/runtime/interpreter/mterp/arm64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"sub x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/op_throw.S b/runtime/interpreter/mterp/arm64/op_throw.S
deleted file mode 100644
index 9a951af..0000000
--- a/runtime/interpreter/mterp/arm64/op_throw.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w1, w2 // r1<- vAA (exception object)
- cbz w1, common_errNullObject
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3e.S b/runtime/interpreter/mterp/arm64/op_unused_3e.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_3f.S b/runtime/interpreter/mterp/arm64/op_unused_3f.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_40.S b/runtime/interpreter/mterp/arm64/op_unused_40.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_41.S b/runtime/interpreter/mterp/arm64/op_unused_41.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_42.S b/runtime/interpreter/mterp/arm64/op_unused_42.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_43.S b/runtime/interpreter/mterp/arm64/op_unused_43.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_73.S b/runtime/interpreter/mterp/arm64/op_unused_73.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_79.S b/runtime/interpreter/mterp/arm64/op_unused_79.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_7a.S b/runtime/interpreter/mterp/arm64/op_unused_7a.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f3.S b/runtime/interpreter/mterp/arm64/op_unused_f3.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f4.S b/runtime/interpreter/mterp/arm64/op_unused_f4.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f5.S b/runtime/interpreter/mterp/arm64/op_unused_f5.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f6.S b/runtime/interpreter/mterp/arm64/op_unused_f6.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f7.S b/runtime/interpreter/mterp/arm64/op_unused_f7.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f8.S b/runtime/interpreter/mterp/arm64/op_unused_f8.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_f9.S b/runtime/interpreter/mterp/arm64/op_unused_f9.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fc.S b/runtime/interpreter/mterp/arm64/op_unused_fc.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_unused_fd.S b/runtime/interpreter/mterp/arm64/op_unused_fd.S
deleted file mode 100644
index 204ecef..0000000
--- a/runtime/interpreter/mterp/arm64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/unused.S"
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int.S b/runtime/interpreter/mterp/arm64/op_ushr_int.S
deleted file mode 100644
index 005452b..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
deleted file mode 100644
index 1cb8cb7..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
deleted file mode 100644
index 555ed4e..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"ubfx w1, w3, #8, #5", "instr":"lsr w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long.S b/runtime/interpreter/mterp/arm64/op_ushr_long.S
deleted file mode 100644
index e13c86a..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
deleted file mode 100644
index 67ec91e..0000000
--- a/runtime/interpreter/mterp/arm64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/shiftWide2addr.S" {"opcode":"lsr"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int.S b/runtime/interpreter/mterp/arm64/op_xor_int.S
deleted file mode 100644
index 7483663..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
deleted file mode 100644
index 2f9a2c7..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binop2addr.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
deleted file mode 100644
index 6b72c56..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit16.S" {"instr":"eor w0, w0, w1"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S b/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
deleted file mode 100644
index 1d3d93e..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopLit8.S" {"extract":"", "instr":"eor w0, w0, w3, asr #8"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long.S b/runtime/interpreter/mterp/arm64/op_xor_long.S
deleted file mode 100644
index 3880d5d..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide.S" {"instr":"eor x0, x1, x2"}
diff --git a/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S b/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
deleted file mode 100644
index 3690552..0000000
--- a/runtime/interpreter/mterp/arm64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "arm64/binopWide2addr.S" {"instr":"eor x0, x0, x1"}
diff --git a/runtime/interpreter/mterp/arm64/other.S b/runtime/interpreter/mterp/arm64/other.S
new file mode 100644
index 0000000..024a5c8
--- /dev/null
+++ b/runtime/interpreter/mterp/arm64/other.S
@@ -0,0 +1,355 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ FETCH w0, 1 // w0<- BBBB
+ lsr w1, wINST, #8 // w1<- AA
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl $helper // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 // load rINST
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 2 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+%def op_const():
+ /* const vAA, #+BBBBbbbb */
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w1, 2 // w1<- BBBB (high
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_16():
+ /* const/16 vAA, #+BBBB */
+ FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance xPC, load wINST
+ SET_VREG w0, w3 // vAA<- w0
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_4():
+ /* const/4 vA, #+B */
+ sbfx w1, wINST, #12, #4 // w1<- sssssssB
+ ubfx w0, wINST, #8, #4 // w0<- A
+ FETCH_ADVANCE_INST 1 // advance xPC, load wINST
+ GET_INST_OPCODE ip // ip<- opcode from xINST
+ SET_VREG w1, w0 // fp[A]<- w1
+ GOTO_OPCODE ip // execute next instruction
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
+ lsr w3, wINST, #8 // r3<- AA
+ lsl w0, w0, #16 // r0<- BBBB0000
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ SET_VREG w0, w3 // vAA<- r0
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, String//BBBBBBBB */
+ EXPORT_PC
+ FETCH w0, 1 // w0<- bbbb (low
+ FETCH w2, 2 // w2<- BBBB (high
+ lsr w1, wINST, #8 // w1<- AA
+ orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
+ add x2, xFP, #OFF_FP_SHADOWFRAME
+ mov x3, xSELF
+ bl MterpConstString // (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 // advance rPC
+ cbnz w0, MterpPossibleException // let reference interpreter deal with it.
+ ADVANCE 3 // advance rPC
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_wide():
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH w0, 1 // w0<- bbbb (low)
+ FETCH w1, 2 // w1<- BBBB (low middle)
+ FETCH w2, 3 // w2<- hhhh (high middle)
+ FETCH w3, 4 // w3<- HHHH (high)
+ lsr w4, wINST, #8 // r4<- AA
+ FETCH_ADVANCE_INST 5 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
+ orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
+ orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
+ SET_VREG_WIDE x0, w4
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S x0, 1 // x0<- ssssssssssssBBBB (sign-extended)
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH w0, 1 // x0<- 000000000000bbbb (low)
+ lsr w3, wINST, #8 // w3<- AA
+ FETCH_S x2, 2 // x2<- ssssssssssssBBBB (high)
+ FETCH_ADVANCE_INST 3 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ orr x0, x0, x2, lsl #16 // x0<- ssssssssBBBBbbbb
+ SET_VREG_WIDE x0, w3
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
+ lsr w1, wINST, #8 // w1<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ lsl x0, x0, #48
+ SET_VREG_WIDE x0, w1
+ GET_INST_OPCODE ip // extract opcode from wINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_monitor_enter():
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w1<- self
+ bl artLockObjectFromCode
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_monitor_exit():
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG w0, w2 // w0<- vAA (object)
+ mov x1, xSELF // w0<- self
+ bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
+ cbnz w0, MterpException
+ FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ lsr w1, wINST, #12 // x1<- B from 15:12
+ ubfx w0, wINST, #8, #4 // x0<- A from 11:8
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_VREG w2, w1 // x2<- fp[B]
+ GET_INST_OPCODE ip // ip<- opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[A]<- x2
+ .else
+ SET_VREG w2, w0 // fp[A]<- x2
+ .endif
+ GOTO_OPCODE ip // execute next instruction
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH w1, 2 // w1<- BBBB
+ FETCH w0, 1 // w0<- AAAA
+ FETCH_ADVANCE_INST 3 // advance xPC, load xINST
+ GET_VREG w2, w1 // w2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from xINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
+ .else
+ SET_VREG w2, w0 // fp[AAAA]<- w2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_exception():
+ /* move-exception vAA */
+ lsr w2, wINST, #8 // w2<- AA
+ ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
+ mov x1, #0 // w1<- 0
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
+ GET_INST_OPCODE ip // extract opcode from rINST
+ str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH w1, 1 // r1<- BBBB
+ lsr w0, wINST, #8 // r0<- AA
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_VREG w2, w1 // r2<- fp[BBBB]
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
+ .else
+ SET_VREG w2, w0 // fp[AA]<- r2
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr w0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ .if $is_object
+ SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
+ .else
+ SET_VREG w0, w2 // fp[AA]<- r0
+ .endif
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* for: move-result-wide */
+ /* op vAA */
+ lsr w2, wINST, #8 // r2<- AA
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
+ ldr x0, [x0] // r0 <- result.i.
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x0, x2 // fp[AA]<- r0
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lsr w3, wINST, #12 // w3<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 1 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 2 // w3<- BBBB
+ FETCH w2, 1 // w2<- AAAA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 3 // advance rPC, load rINST
+ SET_VREG_WIDE x3, w2
+ GET_INST_OPCODE ip // extract opcode from rINST
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH w3, 1 // w3<- BBBB
+ lsr w2, wINST, #8 // w2<- AA
+ GET_VREG_WIDE x3, w3
+ FETCH_ADVANCE_INST 2 // advance rPC, load wINST
+ GET_INST_OPCODE ip // extract opcode from wINST
+ SET_VREG_WIDE x3, w2
+ GOTO_OPCODE ip // jump to next instruction
+
+%def op_nop():
+ FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
+ GET_INST_OPCODE ip // ip<- opcode from rINST
+ GOTO_OPCODE ip // execute it
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_73():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/arm64/shiftWide.S b/runtime/interpreter/mterp/arm64/shiftWide.S
deleted file mode 100644
index dcb2fb7..0000000
--- a/runtime/interpreter/mterp/arm64/shiftWide.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"opcode":"shl"}
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- $opcode x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
diff --git a/runtime/interpreter/mterp/arm64/shiftWide2addr.S b/runtime/interpreter/mterp/arm64/shiftWide2addr.S
deleted file mode 100644
index b860dfd..0000000
--- a/runtime/interpreter/mterp/arm64/shiftWide2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default {"opcode":"lsl"}
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- $opcode x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unop.S b/runtime/interpreter/mterp/arm64/unop.S
deleted file mode 100644
index e681968..0000000
--- a/runtime/interpreter/mterp/arm64/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- $instr // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unopWide.S b/runtime/interpreter/mterp/arm64/unopWide.S
deleted file mode 100644
index 6ee4f92..0000000
--- a/runtime/interpreter/mterp/arm64/unopWide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"instr":"sub x0, xzr, x0"}
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- $instr
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
diff --git a/runtime/interpreter/mterp/arm64/unused.S b/runtime/interpreter/mterp/arm64/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/arm64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/arm64/zcmp.S b/runtime/interpreter/mterp/arm64/zcmp.S
deleted file mode 100644
index 510a3c1..0000000
--- a/runtime/interpreter/mterp/arm64/zcmp.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "compare":"1" }
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if ${compare}
- cmp w2, #0 // compare (vA, 0)
- .endif
- ${branch} MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
diff --git a/runtime/interpreter/mterp/common/gen_setup.py b/runtime/interpreter/mterp/common/gen_setup.py
new file mode 100644
index 0000000..cfa5e2e
--- /dev/null
+++ b/runtime/interpreter/mterp/common/gen_setup.py
@@ -0,0 +1,90 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Common global variables and helper methods for the in-memory python script.
+# The script starts with this file and is followed by the code generated form
+# the templated snippets. Those define all the helper functions used below.
+
+import sys, re
+from cStringIO import StringIO
+
+out = StringIO() # File-like in-memory buffer.
+handler_size_bytes = "MTERP_HANDLER_SIZE"
+handler_size_bits = "MTERP_HANDLER_SIZE_LOG2"
+opcode = ""
+opnum = ""
+
+def write_line(line):
+ out.write(line + "\n")
+
+def balign():
+ write_line(" .balign {}".format(handler_size_bytes))
+
+def write_opcode(num, name, write_method):
+ global opnum, opcode
+ opnum, opcode = str(num), name
+ write_line("/* ------------------------------ */")
+ balign()
+ write_line(".L_{1}: /* {0:#04x} */".format(num, name))
+ opcode_start()
+ opcode_pre()
+ write_method()
+ opcode_end()
+ write_line("")
+ opnum, opcode = None, None
+
+generated_helpers = {}
+
+# This method generates a helper using the provided writer method.
+# The output is temporarily redirected to in-memory buffer.
+def add_helper(write_helper, name = None):
+ if name == None:
+ name = "Mterp_" + opcode + "_helper"
+ global out
+ old_out = out
+ out = StringIO()
+ helper_start(name)
+ write_helper()
+ helper_end(name)
+ out.seek(0)
+ generated_helpers[name] = out.read()
+ out = old_out
+ return name
+
+def generate(output_filename):
+ out.seek(0)
+ out.truncate()
+ write_line("/* DO NOT EDIT: This file was generated by gen-mterp.py. */")
+ header()
+ entry()
+
+ instruction_start()
+ opcodes()
+ balign()
+ instruction_end()
+
+ for name, helper in sorted(generated_helpers.items()):
+ out.write(helper)
+ helpers()
+
+ footer()
+
+ out.seek(0)
+ # Squash consequtive empty lines.
+ text = re.sub(r"(\n\n)(\n)+", r"\1", out.read())
+ with open(output_filename, 'w') as output_file:
+ output_file.write(text)
+
diff --git a/runtime/interpreter/mterp/config_arm b/runtime/interpreter/mterp/config_arm
deleted file mode 100644
index a45efd9..0000000
--- a/runtime/interpreter/mterp/config_arm
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for ARMv7-A targets.
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub arm/alt_stub.S
-
-# file header and basic definitions
-import arm/header.S
-
-# arch-specific entry point to interpreter
-import arm/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub arm/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start arm
- # (override example:) op op_sub_float_2addr arm-vfp
- # (fallback example:) op op_sub_float_2addr FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import arm/footer.S
diff --git a/runtime/interpreter/mterp/config_arm64 b/runtime/interpreter/mterp/config_arm64
deleted file mode 100644
index 590363f..0000000
--- a/runtime/interpreter/mterp/config_arm64
+++ /dev/null
@@ -1,306 +0,0 @@
-
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for ARM64
-#
-
-handler-style computed-goto
-handler-size 128
-
-# file header and basic definitions
-import arm64/header.S
-
-# arch-specific entry point to interpreter
-import arm64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub arm64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start arm64
- # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
- # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm; we emit the footer before alternate
-# entry stubs, so that TBZ/TBNZ from ops can reach targets in footer
-import arm64/footer.S
-
-# source for alternate entry stub
-asm-alt-stub arm64/alt_stub.S
-
-# emit alternate entry stubs
-alt-ops
-
-# finish by closing .cfi info
-import arm64/close_cfi.S
diff --git a/runtime/interpreter/mterp/config_mips b/runtime/interpreter/mterp/config_mips
deleted file mode 100644
index d6173da..0000000
--- a/runtime/interpreter/mterp/config_mips
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for MIPS_32 targets.
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub mips/alt_stub.S
-
-# file header and basic definitions
-import mips/header.S
-
-# arch-specific entry point to interpreter
-import mips/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub mips/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start mips
- # (override example:) op op_sub_float_2addr arm-vfp
- # (fallback example:) op op_sub_float_2addr FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import mips/footer.S
diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64
deleted file mode 100644
index a9bf362..0000000
--- a/runtime/interpreter/mterp/config_mips64
+++ /dev/null
@@ -1,298 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for MIPS_64
-#
-
-handler-style computed-goto
-handler-size 128
-
-# source for alternate entry stub
-asm-alt-stub mips64/alt_stub.S
-
-# file header and basic definitions
-import mips64/header.S
-
-# arch-specific entry point to interpreter
-import mips64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub mips64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start mips64
- # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
- # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import mips64/footer.S
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
deleted file mode 100644
index 2417851..0000000
--- a/runtime/interpreter/mterp/config_x86
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for X86
-#
-
-handler-style computed-goto
-handler-size 128
-
-function-type-format FUNCTION_TYPE(%s)
-function-size-format SIZE(%s,%s)
-global-name-format SYMBOL(%s)
-
-# source for alternate entry stub
-asm-alt-stub x86/alt_stub.S
-
-# file header and basic definitions
-import x86/header.S
-
-# arch-specific entry point to interpreter
-import x86/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub x86/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start x86
- # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
- # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import x86/footer.S
diff --git a/runtime/interpreter/mterp/config_x86_64 b/runtime/interpreter/mterp/config_x86_64
deleted file mode 100644
index 89fbf43..0000000
--- a/runtime/interpreter/mterp/config_x86_64
+++ /dev/null
@@ -1,302 +0,0 @@
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Configuration for X86_64
-#
-
-handler-style computed-goto
-handler-size 128
-
-function-type-format FUNCTION_TYPE(%s)
-function-size-format SIZE(%s,%s)
-global-name-format SYMBOL(%s)
-
-# source for alternate entry stub
-asm-alt-stub x86_64/alt_stub.S
-
-# file header and basic definitions
-import x86_64/header.S
-
-# arch-specific entry point to interpreter
-import x86_64/entry.S
-
-# Stub to switch to alternate interpreter
-fallback-stub x86_64/fallback.S
-
-# opcode list; argument to op-start is default directory
-op-start x86_64
- # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp
- # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK
-
- # op op_nop FALLBACK
- # op op_move FALLBACK
- # op op_move_from16 FALLBACK
- # op op_move_16 FALLBACK
- # op op_move_wide FALLBACK
- # op op_move_wide_from16 FALLBACK
- # op op_move_wide_16 FALLBACK
- # op op_move_object FALLBACK
- # op op_move_object_from16 FALLBACK
- # op op_move_object_16 FALLBACK
- # op op_move_result FALLBACK
- # op op_move_result_wide FALLBACK
- # op op_move_result_object FALLBACK
- # op op_move_exception FALLBACK
- # op op_return_void FALLBACK
- # op op_return FALLBACK
- # op op_return_wide FALLBACK
- # op op_return_object FALLBACK
- # op op_const_4 FALLBACK
- # op op_const_16 FALLBACK
- # op op_const FALLBACK
- # op op_const_high16 FALLBACK
- # op op_const_wide_16 FALLBACK
- # op op_const_wide_32 FALLBACK
- # op op_const_wide FALLBACK
- # op op_const_wide_high16 FALLBACK
- # op op_const_string FALLBACK
- # op op_const_string_jumbo FALLBACK
- # op op_const_class FALLBACK
- # op op_monitor_enter FALLBACK
- # op op_monitor_exit FALLBACK
- # op op_check_cast FALLBACK
- # op op_instance_of FALLBACK
- # op op_array_length FALLBACK
- # op op_new_instance FALLBACK
- # op op_new_array FALLBACK
- # op op_filled_new_array FALLBACK
- # op op_filled_new_array_range FALLBACK
- # op op_fill_array_data FALLBACK
- # op op_throw FALLBACK
- # op op_goto FALLBACK
- # op op_goto_16 FALLBACK
- # op op_goto_32 FALLBACK
- # op op_packed_switch FALLBACK
- # op op_sparse_switch FALLBACK
- # op op_cmpl_float FALLBACK
- # op op_cmpg_float FALLBACK
- # op op_cmpl_double FALLBACK
- # op op_cmpg_double FALLBACK
- # op op_cmp_long FALLBACK
- # op op_if_eq FALLBACK
- # op op_if_ne FALLBACK
- # op op_if_lt FALLBACK
- # op op_if_ge FALLBACK
- # op op_if_gt FALLBACK
- # op op_if_le FALLBACK
- # op op_if_eqz FALLBACK
- # op op_if_nez FALLBACK
- # op op_if_ltz FALLBACK
- # op op_if_gez FALLBACK
- # op op_if_gtz FALLBACK
- # op op_if_lez FALLBACK
- # op op_unused_3e FALLBACK
- # op op_unused_3f FALLBACK
- # op op_unused_40 FALLBACK
- # op op_unused_41 FALLBACK
- # op op_unused_42 FALLBACK
- # op op_unused_43 FALLBACK
- # op op_aget FALLBACK
- # op op_aget_wide FALLBACK
- # op op_aget_object FALLBACK
- # op op_aget_boolean FALLBACK
- # op op_aget_byte FALLBACK
- # op op_aget_char FALLBACK
- # op op_aget_short FALLBACK
- # op op_aput FALLBACK
- # op op_aput_wide FALLBACK
- # op op_aput_object FALLBACK
- # op op_aput_boolean FALLBACK
- # op op_aput_byte FALLBACK
- # op op_aput_char FALLBACK
- # op op_aput_short FALLBACK
- # op op_iget FALLBACK
- # op op_iget_wide FALLBACK
- # op op_iget_object FALLBACK
- # op op_iget_boolean FALLBACK
- # op op_iget_byte FALLBACK
- # op op_iget_char FALLBACK
- # op op_iget_short FALLBACK
- # op op_iput FALLBACK
- # op op_iput_wide FALLBACK
- # op op_iput_object FALLBACK
- # op op_iput_boolean FALLBACK
- # op op_iput_byte FALLBACK
- # op op_iput_char FALLBACK
- # op op_iput_short FALLBACK
- # op op_sget FALLBACK
- # op op_sget_wide FALLBACK
- # op op_sget_object FALLBACK
- # op op_sget_boolean FALLBACK
- # op op_sget_byte FALLBACK
- # op op_sget_char FALLBACK
- # op op_sget_short FALLBACK
- # op op_sput FALLBACK
- # op op_sput_wide FALLBACK
- # op op_sput_object FALLBACK
- # op op_sput_boolean FALLBACK
- # op op_sput_byte FALLBACK
- # op op_sput_char FALLBACK
- # op op_sput_short FALLBACK
- # op op_invoke_virtual FALLBACK
- # op op_invoke_super FALLBACK
- # op op_invoke_direct FALLBACK
- # op op_invoke_static FALLBACK
- # op op_invoke_interface FALLBACK
- # op op_return_void_no_barrier FALLBACK
- # op op_invoke_virtual_range FALLBACK
- # op op_invoke_super_range FALLBACK
- # op op_invoke_direct_range FALLBACK
- # op op_invoke_static_range FALLBACK
- # op op_invoke_interface_range FALLBACK
- # op op_unused_79 FALLBACK
- # op op_unused_7a FALLBACK
- # op op_neg_int FALLBACK
- # op op_not_int FALLBACK
- # op op_neg_long FALLBACK
- # op op_not_long FALLBACK
- # op op_neg_float FALLBACK
- # op op_neg_double FALLBACK
- # op op_int_to_long FALLBACK
- # op op_int_to_float FALLBACK
- # op op_int_to_double FALLBACK
- # op op_long_to_int FALLBACK
- # op op_long_to_float FALLBACK
- # op op_long_to_double FALLBACK
- # op op_float_to_int FALLBACK
- # op op_float_to_long FALLBACK
- # op op_float_to_double FALLBACK
- # op op_double_to_int FALLBACK
- # op op_double_to_long FALLBACK
- # op op_double_to_float FALLBACK
- # op op_int_to_byte FALLBACK
- # op op_int_to_char FALLBACK
- # op op_int_to_short FALLBACK
- # op op_add_int FALLBACK
- # op op_sub_int FALLBACK
- # op op_mul_int FALLBACK
- # op op_div_int FALLBACK
- # op op_rem_int FALLBACK
- # op op_and_int FALLBACK
- # op op_or_int FALLBACK
- # op op_xor_int FALLBACK
- # op op_shl_int FALLBACK
- # op op_shr_int FALLBACK
- # op op_ushr_int FALLBACK
- # op op_add_long FALLBACK
- # op op_sub_long FALLBACK
- # op op_mul_long FALLBACK
- # op op_div_long FALLBACK
- # op op_rem_long FALLBACK
- # op op_and_long FALLBACK
- # op op_or_long FALLBACK
- # op op_xor_long FALLBACK
- # op op_shl_long FALLBACK
- # op op_shr_long FALLBACK
- # op op_ushr_long FALLBACK
- # op op_add_float FALLBACK
- # op op_sub_float FALLBACK
- # op op_mul_float FALLBACK
- # op op_div_float FALLBACK
- # op op_rem_float FALLBACK
- # op op_add_double FALLBACK
- # op op_sub_double FALLBACK
- # op op_mul_double FALLBACK
- # op op_div_double FALLBACK
- # op op_rem_double FALLBACK
- # op op_add_int_2addr FALLBACK
- # op op_sub_int_2addr FALLBACK
- # op op_mul_int_2addr FALLBACK
- # op op_div_int_2addr FALLBACK
- # op op_rem_int_2addr FALLBACK
- # op op_and_int_2addr FALLBACK
- # op op_or_int_2addr FALLBACK
- # op op_xor_int_2addr FALLBACK
- # op op_shl_int_2addr FALLBACK
- # op op_shr_int_2addr FALLBACK
- # op op_ushr_int_2addr FALLBACK
- # op op_add_long_2addr FALLBACK
- # op op_sub_long_2addr FALLBACK
- # op op_mul_long_2addr FALLBACK
- # op op_div_long_2addr FALLBACK
- # op op_rem_long_2addr FALLBACK
- # op op_and_long_2addr FALLBACK
- # op op_or_long_2addr FALLBACK
- # op op_xor_long_2addr FALLBACK
- # op op_shl_long_2addr FALLBACK
- # op op_shr_long_2addr FALLBACK
- # op op_ushr_long_2addr FALLBACK
- # op op_add_float_2addr FALLBACK
- # op op_sub_float_2addr FALLBACK
- # op op_mul_float_2addr FALLBACK
- # op op_div_float_2addr FALLBACK
- # op op_rem_float_2addr FALLBACK
- # op op_add_double_2addr FALLBACK
- # op op_sub_double_2addr FALLBACK
- # op op_mul_double_2addr FALLBACK
- # op op_div_double_2addr FALLBACK
- # op op_rem_double_2addr FALLBACK
- # op op_add_int_lit16 FALLBACK
- # op op_rsub_int FALLBACK
- # op op_mul_int_lit16 FALLBACK
- # op op_div_int_lit16 FALLBACK
- # op op_rem_int_lit16 FALLBACK
- # op op_and_int_lit16 FALLBACK
- # op op_or_int_lit16 FALLBACK
- # op op_xor_int_lit16 FALLBACK
- # op op_add_int_lit8 FALLBACK
- # op op_rsub_int_lit8 FALLBACK
- # op op_mul_int_lit8 FALLBACK
- # op op_div_int_lit8 FALLBACK
- # op op_rem_int_lit8 FALLBACK
- # op op_and_int_lit8 FALLBACK
- # op op_or_int_lit8 FALLBACK
- # op op_xor_int_lit8 FALLBACK
- # op op_shl_int_lit8 FALLBACK
- # op op_shr_int_lit8 FALLBACK
- # op op_ushr_int_lit8 FALLBACK
- # op op_iget_quick FALLBACK
- # op op_iget_wide_quick FALLBACK
- # op op_iget_object_quick FALLBACK
- # op op_iput_quick FALLBACK
- # op op_iput_wide_quick FALLBACK
- # op op_iput_object_quick FALLBACK
- # op op_invoke_virtual_quick FALLBACK
- # op op_invoke_virtual_range_quick FALLBACK
- # op op_iput_boolean_quick FALLBACK
- # op op_iput_byte_quick FALLBACK
- # op op_iput_char_quick FALLBACK
- # op op_iput_short_quick FALLBACK
- # op op_iget_boolean_quick FALLBACK
- # op op_iget_byte_quick FALLBACK
- # op op_iget_char_quick FALLBACK
- # op op_iget_short_quick FALLBACK
- # op op_unused_f3 FALLBACK
- # op op_unused_f4 FALLBACK
- # op op_unused_f5 FALLBACK
- # op op_unused_f6 FALLBACK
- # op op_unused_f7 FALLBACK
- # op op_unused_f8 FALLBACK
- # op op_unused_f9 FALLBACK
- # op op_invoke_polymorphic FALLBACK
- # op op_invoke_polymorphic_range FALLBACK
- # op op_invoke_custom FALLBACK
- # op op_invoke_custom_range FALLBACK
- # op op_const_method_handle FALLBACK
- # op op_const_method_type FALLBACK
-op-end
-
-# common subroutines for asm
-import x86_64/footer.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index 75c5174..5d25955 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -14,605 +14,85 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-# Using instructions from an architecture-specific config file, generate C
-# and assembly source files for the Dalvik interpreter.
-#
+import sys, re, os
+from cStringIO import StringIO
-import sys, string, re, time
-from string import Template
+SCRIPT_DIR = os.path.dirname(sys.argv[0])
+# This file is included verbatim at the start of the in-memory python script.
+SCRIPT_SETUP_CODE = SCRIPT_DIR + "/common/gen_setup.py"
+INTERP_DEFS_FILE = SCRIPT_DIR + "/../../../libdexfile/dex/dex_instruction_list.h"
+NUM_PACKED_OPCODES = 256
-interp_defs_file = "../../../libdexfile/dex/dex_instruction_list.h" # need opcode list
-kNumPackedOpcodes = 256
-
-splitops = False
-verbose = False
-handler_size_bits = -1000
-handler_size_bytes = -1000
-in_op_start = 0 # 0=not started, 1=started, 2=ended
-in_alt_op_start = 0 # 0=not started, 1=started, 2=ended
-default_op_dir = None
-default_alt_stub = None
-opcode_locations = {}
-alt_opcode_locations = {}
-asm_stub_text = []
-fallback_stub_text = []
-label_prefix = ".L" # use ".L" to hide labels from gdb
-alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
-style = None # interpreter style
-generate_alt_table = False
-function_type_format = ".type %s, %%function"
-function_size_format = ".size %s, .-%s"
-global_name_format = "%s"
-
-# Exception class.
-class DataParseError(SyntaxError):
- "Failure when parsing data file"
-
-#
-# Set any omnipresent substitution values.
-#
-def getGlobalSubDict():
- return { "handler_size_bits":handler_size_bits,
- "handler_size_bytes":handler_size_bytes }
-
-#
-# Parse arch config file --
-# Set interpreter style.
-#
-def setHandlerStyle(tokens):
- global style
- if len(tokens) != 2:
- raise DataParseError("handler-style requires one argument")
- style = tokens[1]
- if style != "computed-goto":
- raise DataParseError("handler-style (%s) invalid" % style)
-
-#
-# Parse arch config file --
-# Set handler_size_bytes to the value of tokens[1], and handler_size_bits to
-# log2(handler_size_bytes). Throws an exception if "bytes" is not 0 or
-# a power of two.
-#
-def setHandlerSize(tokens):
- global handler_size_bits, handler_size_bytes
- if style != "computed-goto":
- print "Warning: handler-size valid only for computed-goto interpreters"
- if len(tokens) != 2:
- raise DataParseError("handler-size requires one argument")
- if handler_size_bits != -1000:
- raise DataParseError("handler-size may only be set once")
-
- # compute log2(n), and make sure n is 0 or a power of 2
- handler_size_bytes = bytes = int(tokens[1])
- bits = -1
- while bytes > 0:
- bytes //= 2 # halve with truncating division
- bits += 1
-
- if handler_size_bytes == 0 or handler_size_bytes != (1 << bits):
- raise DataParseError("handler-size (%d) must be power of 2" \
- % orig_bytes)
- handler_size_bits = bits
-
-#
-# Parse arch config file --
-# Copy a file in to asm output file.
-#
-def importFile(tokens):
- if len(tokens) != 2:
- raise DataParseError("import requires one argument")
- source = tokens[1]
- if source.endswith(".S"):
- appendSourceFile(tokens[1], getGlobalSubDict(), asm_fp, None)
- else:
- raise DataParseError("don't know how to import %s (expecting .cpp/.S)"
- % source)
-
-#
-# Parse arch config file --
-# Copy a file in to the C or asm output file.
-#
-def setAsmStub(tokens):
- global asm_stub_text
- if len(tokens) != 2:
- raise DataParseError("import requires one argument")
- try:
- stub_fp = open(tokens[1])
- asm_stub_text = stub_fp.readlines()
- except IOError, err:
- stub_fp.close()
- raise DataParseError("unable to load asm-stub: %s" % str(err))
- stub_fp.close()
-
-#
-# Parse arch config file --
-# Copy a file in to the C or asm output file.
-#
-def setFallbackStub(tokens):
- global fallback_stub_text
- if len(tokens) != 2:
- raise DataParseError("import requires one argument")
- try:
- stub_fp = open(tokens[1])
- fallback_stub_text = stub_fp.readlines()
- except IOError, err:
- stub_fp.close()
- raise DataParseError("unable to load fallback-stub: %s" % str(err))
- stub_fp.close()
-#
-# Parse arch config file --
-# Record location of default alt stub
-#
-def setAsmAltStub(tokens):
- global default_alt_stub, generate_alt_table
- if len(tokens) != 2:
- raise DataParseError("import requires one argument")
- default_alt_stub = tokens[1]
- generate_alt_table = True
-#
-# Change the default function type format
-#
-def setFunctionTypeFormat(tokens):
- global function_type_format
- function_type_format = tokens[1]
-#
-# Change the default function size format
-#
-def setFunctionSizeFormat(tokens):
- global function_size_format
- function_size_format = tokens[1]
-#
-# Change the global name format
-#
-def setGlobalNameFormat(tokens):
- global global_name_format
- global_name_format = tokens[1]
-#
-# Parse arch config file --
-# Start of opcode list.
-#
-def opStart(tokens):
- global in_op_start
- global default_op_dir
- if len(tokens) != 2:
- raise DataParseError("opStart takes a directory name argument")
- if in_op_start != 0:
- raise DataParseError("opStart can only be specified once")
- default_op_dir = tokens[1]
- in_op_start = 1
-
-#
-# Parse arch config file --
-# Set location of a single alt opcode's source file.
-#
-def altEntry(tokens):
- global generate_alt_table
- if len(tokens) != 3:
- raise DataParseError("alt requires exactly two arguments")
- if in_op_start != 1:
- raise DataParseError("alt statements must be between opStart/opEnd")
- try:
- index = opcodes.index(tokens[1])
- except ValueError:
- raise DataParseError("unknown opcode %s" % tokens[1])
- if alt_opcode_locations.has_key(tokens[1]):
- print "Note: alt overrides earlier %s (%s -> %s)" \
- % (tokens[1], alt_opcode_locations[tokens[1]], tokens[2])
- alt_opcode_locations[tokens[1]] = tokens[2]
- generate_alt_table = True
-
-#
-# Parse arch config file --
-# Set location of a single opcode's source file.
-#
-def opEntry(tokens):
- #global opcode_locations
- if len(tokens) != 3:
- raise DataParseError("op requires exactly two arguments")
- if in_op_start != 1:
- raise DataParseError("op statements must be between opStart/opEnd")
- try:
- index = opcodes.index(tokens[1])
- except ValueError:
- raise DataParseError("unknown opcode %s" % tokens[1])
- if opcode_locations.has_key(tokens[1]):
- print "Note: op overrides earlier %s (%s -> %s)" \
- % (tokens[1], opcode_locations[tokens[1]], tokens[2])
- opcode_locations[tokens[1]] = tokens[2]
-
-#
-# Parse arch config file --
-# End of opcode list; emit instruction blocks.
-#
-def opEnd(tokens):
- global in_op_start
- if len(tokens) != 1:
- raise DataParseError("opEnd takes no arguments")
- if in_op_start != 1:
- raise DataParseError("opEnd must follow opStart, and only appear once")
- in_op_start = 2
-
- loadAndEmitOpcodes()
- if splitops == False:
- if generate_alt_table:
- loadAndEmitAltOpcodes()
-
-def genaltop(tokens):
- if in_op_start != 2:
- raise DataParseError("alt-op can be specified only after op-end")
- if len(tokens) != 1:
- raise DataParseError("opEnd takes no arguments")
- if generate_alt_table:
- loadAndEmitAltOpcodes()
-
-#
# Extract an ordered list of instructions from the VM sources. We use the
-# "goto table" definition macro, which has exactly kNumPackedOpcodes
-# entries.
-#
+# "goto table" definition macro, which has exactly NUM_PACKED_OPCODES entries.
def getOpcodeList():
- opcodes = []
- opcode_fp = open(interp_defs_file)
- opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
- for line in opcode_fp:
- match = opcode_re.match(line)
- if not match:
- continue
- opcodes.append("op_" + match.group(2).lower())
- opcode_fp.close()
+ opcodes = []
+ opcode_fp = open(INTERP_DEFS_FILE)
+ opcode_re = re.compile(r"^\s*V\((....), (\w+),.*", re.DOTALL)
+ for line in opcode_fp:
+ match = opcode_re.match(line)
+ if not match:
+ continue
+ opcodes.append("op_" + match.group(2).lower())
+ opcode_fp.close()
- if len(opcodes) != kNumPackedOpcodes:
- print "ERROR: found %d opcodes in Interp.h (expected %d)" \
- % (len(opcodes), kNumPackedOpcodes)
- raise SyntaxError, "bad opcode count"
- return opcodes
+ if len(opcodes) != NUM_PACKED_OPCODES:
+ print "ERROR: found %d opcodes in Interp.h (expected %d)" \
+ % (len(opcodes), NUM_PACKED_OPCODES)
+ raise SyntaxError, "bad opcode count"
+ return opcodes
-def emitAlign():
- if style == "computed-goto":
- asm_fp.write(" .balign %d\n" % handler_size_bytes)
+indent_re = re.compile(r"^%( *)")
-#
-# Load and emit opcodes for all kNumPackedOpcodes instructions.
-#
-def loadAndEmitOpcodes():
- sister_list = []
- assert len(opcodes) == kNumPackedOpcodes
- need_dummy_start = False
+# Finds variable references in text: $foo or ${foo}
+escape_re = re.compile(r'''
+ (?<!\$) # Look-back: must not be preceded by another $.
+ \$
+ (\{)? # May be enclosed by { } pair.
+ (?P<name>\w+) # Save the symbol in named group.
+ (?(1)\}) # Expect } if and only if { was present.
+''', re.VERBOSE)
- loadAndEmitGenericAsm("instruction_start")
+def generate_script(output_filename, input_filenames):
+ # Create new python script and write the initial setup code.
+ script = StringIO() # File-like in-memory buffer.
+ script.write("# DO NOT EDIT: This file was generated by gen-mterp.py.\n")
+ script.write(open(SCRIPT_SETUP_CODE, "r").read())
+ script.write("def opcodes():\n")
+ for i, opcode in enumerate(getOpcodeList()):
+ script.write(' write_opcode({0}, "{1}", {1})\n'.format(i, opcode))
- for i in xrange(kNumPackedOpcodes):
- op = opcodes[i]
+ # Read all template files and translate them into python code.
+ for input_filename in sorted(input_filenames):
+ lines = open(input_filename, "r").readlines()
+ indent = ""
+ for line in lines:
+ line = line.rstrip()
+ if line.startswith("%"):
+ script.write(line.lstrip("%") + "\n")
+ indent = indent_re.match(line).group(1)
+ if line.endswith(":"):
+ indent += " "
+ else:
+ line = escape_re.sub(r"''' + \g<name> + '''", line)
+ line = line.replace("\\", "\\\\")
+ line = line.replace("$$", "$")
+ script.write(indent + "write_line('''" + line + "''')\n")
+ script.write("\n")
- if opcode_locations.has_key(op):
- location = opcode_locations[op]
- else:
- location = default_op_dir
+ script.write("generate('''" + output_filename + "''')\n")
+ script.seek(0)
+ return script.read()
- if location == "FALLBACK":
- emitFallback(i)
- else:
- loadAndEmitAsm(location, i, sister_list)
+if len(sys.argv) <= 3:
+ print("Usage: output_file input_file(s)")
+ sys.exit(1)
- # For a 100% C implementation, there are no asm handlers or stubs. We
- # need to have the MterpAsmInstructionStart label point at op_nop, and it's
- # too annoying to try to slide it in after the alignment psuedo-op, so
- # we take the low road and just emit a dummy op_nop here.
- if need_dummy_start:
- emitAlign()
- asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
-
- emitAlign()
-
- loadAndEmitGenericAsm("instruction_end")
-
- if style == "computed-goto":
- emitSectionComment("Sister implementations", asm_fp)
- loadAndEmitGenericAsm("instruction_start_sister")
- asm_fp.writelines(sister_list)
- loadAndEmitGenericAsm("instruction_end_sister")
-
-#
-# Load an alternate entry stub
-#
-def loadAndEmitAltStub(source, opindex):
- op = opcodes[opindex]
- if verbose:
- print " alt emit %s --> stub" % source
- dict = getGlobalSubDict()
- dict.update({ "opcode":op, "opnum":opindex })
-
- emitAsmHeader(asm_fp, dict, alt_label_prefix)
- appendSourceFile(source, dict, asm_fp, None)
-
-#
-# Load and emit alternate opcodes for all kNumPackedOpcodes instructions.
-#
-def loadAndEmitAltOpcodes():
- assert len(opcodes) == kNumPackedOpcodes
- start_label = global_name_format % "artMterpAsmAltInstructionStart"
- end_label = global_name_format % "artMterpAsmAltInstructionEnd"
-
- loadAndEmitGenericAsm("instruction_start_alt")
-
- for i in xrange(kNumPackedOpcodes):
- op = opcodes[i]
- if alt_opcode_locations.has_key(op):
- source = "%s/alt_%s.S" % (alt_opcode_locations[op], op)
- else:
- source = default_alt_stub
- loadAndEmitAltStub(source, i)
-
- emitAlign()
-
- loadAndEmitGenericAsm("instruction_end_alt")
-
-#
-# Load an assembly fragment and emit it.
-#
-def loadAndEmitAsm(location, opindex, sister_list):
- op = opcodes[opindex]
- source = "%s/%s.S" % (location, op)
- dict = getGlobalSubDict()
- dict.update({ "opcode":op, "opnum":opindex })
- if verbose:
- print " emit %s --> asm" % source
-
- emitAsmHeader(asm_fp, dict, label_prefix)
- appendSourceFile(source, dict, asm_fp, sister_list)
-
-#
-# Load a non-handler assembly fragment and emit it.
-#
-def loadAndEmitGenericAsm(name):
- source = "%s/%s.S" % (default_op_dir, name)
- dict = getGlobalSubDict()
- appendSourceFile(source, dict, asm_fp, None)
-
-#
-# Emit fallback fragment
-#
-def emitFallback(opindex):
- op = opcodes[opindex]
- dict = getGlobalSubDict()
- dict.update({ "opcode":op, "opnum":opindex })
- emitAsmHeader(asm_fp, dict, label_prefix)
- for line in fallback_stub_text:
- asm_fp.write(line)
- asm_fp.write("\n")
-
-#
-# Output the alignment directive and label for an assembly piece.
-#
-def emitAsmHeader(outfp, dict, prefix):
- outfp.write("/* ------------------------------ */\n")
- # The alignment directive ensures that the handler occupies
- # at least the correct amount of space. We don't try to deal
- # with overflow here.
- emitAlign()
- # Emit a label so that gdb will say the right thing. We prepend an
- # underscore so the symbol name doesn't clash with the Opcode enum.
- outfp.write(prefix + "_%(opcode)s: /* 0x%(opnum)02x */\n" % dict)
-
-#
-# Output a generic instruction stub that updates the "glue" struct and
-# calls the C implementation.
-#
-def emitAsmStub(outfp, dict):
- emitAsmHeader(outfp, dict, label_prefix)
- for line in asm_stub_text:
- templ = Template(line)
- outfp.write(templ.substitute(dict))
-
-#
-# Append the file specified by "source" to the open "outfp". Each line will
-# be template-replaced using the substitution dictionary "dict".
-#
-# If the first line of the file starts with "%" it is taken as a directive.
-# A "%include" line contains a filename and, optionally, a Python-style
-# dictionary declaration with substitution strings. (This is implemented
-# with recursion.)
-#
-# If "sister_list" is provided, and we find a line that contains only "&",
-# all subsequent lines from the file will be appended to sister_list instead
-# of copied to the output.
-#
-# This may modify "dict".
-#
-def appendSourceFile(source, dict, outfp, sister_list):
- outfp.write("/* File: %s */\n" % source)
- infp = open(source, "r")
- in_sister = False
- for line in infp:
- if line.startswith("%include"):
- # Parse the "include" line
- tokens = line.strip().split(' ', 2)
- if len(tokens) < 2:
- raise DataParseError("malformed %%include in %s" % source)
-
- alt_source = tokens[1].strip("\"")
- if alt_source == source:
- raise DataParseError("self-referential %%include in %s"
- % source)
-
- new_dict = dict.copy()
- if len(tokens) == 3:
- new_dict.update(eval(tokens[2]))
- #print " including src=%s dict=%s" % (alt_source, new_dict)
- appendSourceFile(alt_source, new_dict, outfp, sister_list)
- continue
-
- elif line.startswith("%default"):
- # copy keywords into dictionary
- tokens = line.strip().split(' ', 1)
- if len(tokens) < 2:
- raise DataParseError("malformed %%default in %s" % source)
- defaultValues = eval(tokens[1])
- for entry in defaultValues:
- dict.setdefault(entry, defaultValues[entry])
- continue
-
- elif line.startswith("%break") and sister_list != None:
- # allow more than one %break, ignoring all following the first
- if style == "computed-goto" and not in_sister:
- in_sister = True
- sister_list.append("\n/* continuation for %(opcode)s */\n"%dict)
- continue
-
- # perform keyword substitution if a dictionary was provided
- if dict != None:
- templ = Template(line)
- try:
- subline = templ.substitute(dict)
- except KeyError, err:
- raise DataParseError("keyword substitution failed in %s: %s"
- % (source, str(err)))
- except:
- print "ERROR: substitution failed: " + line
- raise
- else:
- subline = line
-
- # write output to appropriate file
- if in_sister:
- sister_list.append(subline)
- else:
- outfp.write(subline)
- outfp.write("\n")
- infp.close()
-
-#
-# Emit a C-style section header comment.
-#
-def emitSectionComment(str, fp):
- equals = "========================================" \
- "==================================="
-
- fp.write("\n/*\n * %s\n * %s\n * %s\n */\n" %
- (equals, str, equals))
-
-
-#
-# ===========================================================================
-# "main" code
-#
-
-#
-# Check args.
-#
-if len(sys.argv) != 3:
- print "Usage: %s target-arch output-dir" % sys.argv[0]
- sys.exit(2)
-
-target_arch = sys.argv[1]
-output_dir = sys.argv[2]
-
-#
-# Extract opcode list.
-#
-opcodes = getOpcodeList()
-#for op in opcodes:
-# print " %s" % op
-
-#
-# Open config file.
-#
-try:
- config_fp = open("config_%s" % target_arch)
-except:
- print "Unable to open config file 'config_%s'" % target_arch
- sys.exit(1)
-
-#
-# Open and prepare output files.
-#
-try:
- asm_fp = open("%s/mterp_%s.S" % (output_dir, target_arch), "w")
-except:
- print "Unable to open output files"
- print "Make sure directory '%s' exists and existing files are writable" \
- % output_dir
- # Ideally we'd remove the files to avoid confusing "make", but if they
- # failed to open we probably won't be able to remove them either.
- sys.exit(1)
-
-print "Generating %s" % (asm_fp.name)
-
-file_header = """/*
- * This file was generated automatically by gen-mterp.py for '%s'.
- *
- * --> DO NOT EDIT <--
- */
-
-""" % (target_arch)
-
-asm_fp.write(file_header)
-
-#
-# Process the config file.
-#
-failed = False
-try:
- for line in config_fp:
- line = line.strip() # remove CRLF, leading spaces
- tokens = line.split(' ') # tokenize
- #print "%d: %s" % (len(tokens), tokens)
- if len(tokens[0]) == 0:
- #print " blank"
- pass
- elif tokens[0][0] == '#':
- #print " comment"
- pass
- else:
- if tokens[0] == "handler-size":
- setHandlerSize(tokens)
- elif tokens[0] == "import":
- importFile(tokens)
- elif tokens[0] == "asm-stub":
- setAsmStub(tokens)
- elif tokens[0] == "asm-alt-stub":
- setAsmAltStub(tokens)
- elif tokens[0] == "op-start":
- opStart(tokens)
- elif tokens[0] == "op-end":
- opEnd(tokens)
- elif tokens[0] == "alt":
- altEntry(tokens)
- elif tokens[0] == "op":
- opEntry(tokens)
- elif tokens[0] == "handler-style":
- setHandlerStyle(tokens)
- elif tokens[0] == "alt-ops":
- genaltop(tokens)
- elif tokens[0] == "split-ops":
- splitops = True
- elif tokens[0] == "fallback-stub":
- setFallbackStub(tokens)
- elif tokens[0] == "function-type-format":
- setFunctionTypeFormat(tokens)
- elif tokens[0] == "function-size-format":
- setFunctionSizeFormat(tokens)
- elif tokens[0] == "global-name-format":
- setGlobalNameFormat(tokens)
- else:
- raise DataParseError, "unrecognized command '%s'" % tokens[0]
- if style == None:
- print "tokens[0] = %s" % tokens[0]
- raise DataParseError, "handler-style must be first command"
-except DataParseError, err:
- print "Failed: " + str(err)
- # TODO: remove output files so "make" doesn't get confused
- failed = True
- asm_fp.close()
- asm_fp = None
-
-config_fp.close()
-
-#
-# Done!
-#
-if asm_fp:
- asm_fp.close()
-
-sys.exit(failed)
+# Generate the script and execute it.
+output_filename = sys.argv[1]
+input_filenames = sys.argv[2:]
+script_filename = output_filename + ".py"
+script = generate_script(output_filename, input_filenames)
+with open(script_filename, "w") as script_file:
+ script_file.write(script) # Write to disk for debugging.
+exec(compile(script, script_filename, mode='exec'))
diff --git a/runtime/interpreter/mterp/mips/alt_stub.S b/runtime/interpreter/mterp/mips/alt_stub.S
deleted file mode 100644
index de13313..0000000
--- a/runtime/interpreter/mterp/mips/alt_stub.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (${opnum} * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
diff --git a/runtime/interpreter/mterp/mips/arithmetic.S b/runtime/interpreter/mterp/mips/arithmetic.S
new file mode 100644
index 0000000..9ae10f2
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/arithmetic.S
@@ -0,0 +1,803 @@
+%def binop(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+
+%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
+
+%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ .if $chkzero
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
+
+%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+
+%def binopWide(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register pair other than a0-a1, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a2-a3). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64($arg0, $arg1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64($arg2, $arg3, t1) # a2/a3 <- vCC/vCC+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vAA/vAA+1 <- $result0/$result1
+
+%def binopWide2addr(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register pair other than a0-a1, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a2-a3). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64($arg2, $arg3, a1) # a2/a3 <- vB/vB+1
+ LOAD64($arg0, $arg1, t0) # a0/a1 <- vA/vA+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- $result0/$result1
+
+%def unop(preinstr="", result0="a0", instr=""):
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result0 = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * neg-int, not-int, neg-float
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO($result0, t0, t1) # vA <- result0
+
+%def unopNarrower(load="LOAD64_F(fa0, fa0f, a3)", instr=""):
+ /*
+ * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
+ *
+ * For: double-to-float
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ $load
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
+
+%def unopWide(preinstr="", result0="a0", result1="a1", instr=""):
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result0/result1 = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double,
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- vA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
+
+%def unopWider(preinstr="", result0="a0", result1="a1", instr=""):
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result0/result1 = op a0".
+ *
+ * For: int-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
+
+%def op_add_int():
+% binop(instr="addu a0, a0, a1")
+
+%def op_add_int_2addr():
+% binop2addr(instr="addu a0, a0, a1")
+
+%def op_add_int_lit16():
+% binopLit16(instr="addu a0, a0, a1")
+
+%def op_add_int_lit8():
+% binopLit8(instr="addu a0, a0, a1")
+
+%def op_add_long():
+/*
+ * The compiler generates the following sequence for
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu a1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,a1
+ */
+% binopWide(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
+
+%def op_add_long_2addr():
+/*
+ * See op_add_long.S for details
+ */
+% binopWide2addr(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
+
+%def op_and_int():
+% binop(instr="and a0, a0, a1")
+
+%def op_and_int_2addr():
+% binop2addr(instr="and a0, a0, a1")
+
+%def op_and_int_lit16():
+% binopLit16(instr="and a0, a0, a1")
+
+%def op_and_int_lit8():
+% binopLit8(instr="and a0, a0, a1")
+
+%def op_and_long():
+% binopWide(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
+
+%def op_and_long_2addr():
+% binopWide2addr(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
+
+%def op_cmp_long():
+ /*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * I think I can improve on the ARM code by the following observation
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(a3, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ slt t0, a1, a3 # compare hi
+ sgt t1, a1, a3
+ subu v0, t1, t0 # v0 <- (-1, 1, 0)
+ bnez v0, .L${opcode}_finish
+ # at this point x.hi==y.hi
+ sltu t0, a0, a2 # compare lo
+ sgtu t1, a0, a2
+ subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+%def op_div_int():
+#ifdef MIPS32REVGE6
+% binop(instr="div a0, a0, a1", chkzero="1")
+#else
+% binop(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_2addr():
+#ifdef MIPS32REVGE6
+% binop2addr(instr="div a0, a0, a1", chkzero="1")
+#else
+% binop2addr(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_lit16():
+#ifdef MIPS32REVGE6
+% binopLit16(instr="div a0, a0, a1", chkzero="1")
+#else
+% binopLit16(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_int_lit8():
+#ifdef MIPS32REVGE6
+% binopLit8(instr="div a0, a0, a1", chkzero="1")
+#else
+% binopLit8(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
+#endif
+
+%def op_div_long():
+% binopWide(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
+
+%def op_div_long_2addr():
+% binopWide2addr(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
+
+%def op_int_to_byte():
+% unop(instr="SEB(a0, a0)")
+
+%def op_int_to_char():
+% unop(preinstr="", instr="and a0, 0xffff")
+
+%def op_int_to_long():
+% unopWider(instr="sra a1, a0, 31")
+
+%def op_int_to_short():
+% unop(instr="SEH(a0, a0)")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+% binop(instr="mul a0, a0, a1")
+
+%def op_mul_int_2addr():
+% binop2addr(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit16():
+% binopLit16(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit8():
+% binopLit8(instr="mul a0, a0, a1")
+
+%def op_mul_long():
+ /*
+ * Signed 64-bit integer multiply.
+ * a1 a0
+ * x a3 a2
+ * -------------
+ * a2a1 a2a0
+ * a3a0
+ * a3a1 (<= unused)
+ * ---------------
+ * v1 v0
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ and t0, a0, 255 # a2 <- BB
+ srl t1, a0, 8 # a3 <- CC
+ EAS2(t0, rFP, t0) # t0 <- &fp[BB]
+ LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t1, rFP, t1) # t0 <- &fp[CC]
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+#endif
+ mul t0, a2, a1 # t0= a2a1
+ addu v1, v1, t1 # v1+= hi(a2a0)
+ addu v1, v1, t0 # v1= a3a0 + a2a1;
+
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ b .L${opcode}_finish
+%def op_mul_long_helper_code():
+
+.Lop_mul_long_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
+
+%def op_mul_long_2addr():
+ /*
+ * See op_mul_long.S for more details
+ */
+ /* mul-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # vAA.low / high
+
+ GET_OPB(t1) # t1 <- B
+ EAS2(t1, rFP, t1) # t1 <- &fp[B]
+ LOAD64(a2, a3, t1) # vBB.low / high
+
+ mul v1, a3, a0 # v1= a3a0
+#ifdef MIPS32REVGE6
+ mulu v0, a2, a0 # v0= a2a0
+ muhu t1, a2, a0
+#else
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ #endif
+ mul t2, a2, a1 # t2= a2a1
+ addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
+ addu v1, v1, t2 # v1= v1 + a2a1;
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
+
+%def op_neg_int():
+% unop(instr="negu a0, a0")
+
+%def op_neg_long():
+% unopWide(result0="v0", result1="v1", preinstr="negu v0, a0", instr="negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0")
+
+%def op_not_int():
+% unop(instr="not a0, a0")
+
+%def op_not_long():
+% unopWide(preinstr="not a0, a0", instr="not a1, a1")
+
+%def op_or_int():
+% binop(instr="or a0, a0, a1")
+
+%def op_or_int_2addr():
+% binop2addr(instr="or a0, a0, a1")
+
+%def op_or_int_lit16():
+% binopLit16(instr="or a0, a0, a1")
+
+%def op_or_int_lit8():
+% binopLit8(instr="or a0, a0, a1")
+
+%def op_or_long():
+% binopWide(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
+
+%def op_or_long_2addr():
+% binopWide2addr(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
+
+%def op_rem_int():
+#ifdef MIPS32REVGE6
+% binop(instr="mod a0, a0, a1", chkzero="1")
+#else
+% binop(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_2addr():
+#ifdef MIPS32REVGE6
+% binop2addr(instr="mod a0, a0, a1", chkzero="1")
+#else
+% binop2addr(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_lit16():
+#ifdef MIPS32REVGE6
+% binopLit16(instr="mod a0, a0, a1", chkzero="1")
+#else
+% binopLit16(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_int_lit8():
+#ifdef MIPS32REVGE6
+% binopLit8(instr="mod a0, a0, a1", chkzero="1")
+#else
+% binopLit8(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
+#endif
+
+%def op_rem_long():
+% binopWide(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
+
+%def op_rem_long_2addr():
+% binopWide2addr(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+% binopLit16(instr="subu a0, a1, a0")
+
+%def op_rsub_int_lit8():
+% binopLit8(instr="subu a0, a1, a0")
+
+%def op_shl_int():
+% binop(instr="sll a0, a0, a1")
+
+%def op_shl_int_2addr():
+% binop2addr(instr="sll a0, a0, a1")
+
+%def op_shl_int_lit8():
+% binopLit8(instr="sll a0, a0, a1")
+
+%def op_shl_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t2) # t2 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .L${opcode}_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
+%def op_shl_long_helper_code():
+
+.Lop_shl_long_finish:
+ SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
+
+%def op_shl_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
+ LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v1, a2, 0x20 # shift< shift & 0x20
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ bnez v1, .L${opcode}_finish
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
+%def op_shl_long_2addr_helper_code():
+
+.Lop_shl_long_2addr_finish:
+ SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
+
+%def op_shr_int():
+% binop(instr="sra a0, a0, a1")
+
+%def op_shr_int_2addr():
+% binop2addr(instr="sra a0, a0, a1")
+
+%def op_shr_int_lit8():
+% binopLit8(instr="sra a0, a0, a1")
+
+%def op_shr_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t3) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
+%def op_shr_long_helper_code():
+
+.Lop_shr_long_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
+
+%def op_shr_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t2) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
+%def op_shr_long_2addr_helper_code():
+
+.Lop_shr_long_2addr_finish:
+ sra a3, a1, 31 # a3<- sign(ah)
+ SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
+
+%def op_sub_int():
+% binop(instr="subu a0, a0, a1")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="subu a0, a0, a1")
+
+%def op_sub_long():
+/*
+ * For little endian the code sequence looks as follows:
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu a0,a0,v0
+ * subu v1,v1,a0
+ */
+% binopWide(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
+
+%def op_sub_long_2addr():
+/*
+ * See op_sub_long.S for more details
+ */
+% binopWide2addr(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
+
+%def op_ushr_int():
+% binop(instr="srl a0, a0, a1")
+
+%def op_ushr_int_2addr():
+% binop2addr(instr="srl a0, a0, a1 ")
+
+%def op_ushr_int_lit8():
+% binopLit8(instr="srl a0, a0, a1")
+
+%def op_ushr_long():
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
+%def op_ushr_long_helper_code():
+
+.Lop_ushr_long_finish:
+ SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
+
+%def op_ushr_long_2addr():
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ GET_OPA4(t3) # t3 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t0, rFP, t3) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+
+ andi v0, a2, 0x20 # shift & 0x20
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ bnez v0, .L${opcode}_finish
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
+%def op_ushr_long_2addr_helper_code():
+
+.Lop_ushr_long_2addr_finish:
+ SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
+
+%def op_xor_int():
+% binop(instr="xor a0, a0, a1")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit8():
+% binopLit8(instr="xor a0, a0, a1")
+
+%def op_xor_long():
+% binopWide(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
+
+%def op_xor_long_2addr():
+% binopWide2addr(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
diff --git a/runtime/interpreter/mterp/mips/array.S b/runtime/interpreter/mterp/mips/array.S
new file mode 100644
index 0000000..57ab147
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/array.S
@@ -0,0 +1,239 @@
+%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $load a2, $data_offset(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+%def op_aget_boolean():
+% op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+%def op_aget_short():
+% op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
+
+%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1, 0) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_B(a3, 1, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ $store a2, $data_offset(a0) # vBB[vCC] <- a2
+ JR(t0) # jump to next instruction
+
+%def op_aput_boolean():
+% op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpAputObject)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_aput_short():
+% op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t0 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
+ JR(t0) # jump to next instruction
+
+%def op_array_length():
+ /*
+ * Return the length of an array.
+ */
+ /* array-length vA, vB */
+ GET_OPB(a1) # a1 <- B
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a0, a1) # a0 <- vB (object ref)
+ # is object null?
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a3, a2, t0) # vA <- length
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a1, 1) # a1 <- bbbb (lo)
+ FETCH(a0, 2) # a0 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
+ GET_VREG(a0, a3) # a0 <- vAA (array object)
+ EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
+ JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
+ move a1, rPC
+ move a2, rSELF
+ JAL($helper) # v0 <- helper(shadow_frame, pc, self)
+ beqz v0, MterpPossibleException # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ JAL(MterpNewArray)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/bincmp.S b/runtime/interpreter/mterp/mips/bincmp.S
deleted file mode 100644
index 68df5c3..0000000
--- a/runtime/interpreter/mterp/mips/bincmp.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- b${condition} a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S
deleted file mode 100644
index 862d95a..0000000
--- a/runtime/interpreter/mterp/mips/binop.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if $chkzero
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
diff --git a/runtime/interpreter/mterp/mips/binop2addr.S b/runtime/interpreter/mterp/mips/binop2addr.S
deleted file mode 100644
index 17aa8eb..0000000
--- a/runtime/interpreter/mterp/mips/binop2addr.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if $chkzero
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit16.S b/runtime/interpreter/mterp/mips/binopLit16.S
deleted file mode 100644
index 0696e7a..0000000
--- a/runtime/interpreter/mterp/mips/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if $chkzero
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopLit8.S b/runtime/interpreter/mterp/mips/binopLit8.S
deleted file mode 100644
index 382dd2b..0000000
--- a/runtime/interpreter/mterp/mips/binopLit8.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if $chkzero
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
diff --git a/runtime/interpreter/mterp/mips/binopWide.S b/runtime/interpreter/mterp/mips/binopWide.S
deleted file mode 100644
index 604134d..0000000
--- a/runtime/interpreter/mterp/mips/binopWide.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64($arg0, $arg1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64($arg2, $arg3, t1) # a2/a3 <- vCC/vCC+1
- .if $chkzero
- or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- $preinstr # optional op
- $instr # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vAA/vAA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/binopWide2addr.S b/runtime/interpreter/mterp/mips/binopWide2addr.S
deleted file mode 100644
index f96fdb2..0000000
--- a/runtime/interpreter/mterp/mips/binopWide2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64($arg2, $arg3, a1) # a2/a3 <- vB/vB+1
- LOAD64($arg0, $arg1, t0) # a0/a1 <- vA/vA+1
- .if $chkzero
- or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- $preinstr # optional op
- $instr # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- $result0/$result1
diff --git a/runtime/interpreter/mterp/mips/const.S b/runtime/interpreter/mterp/mips/const.S
deleted file mode 100644
index 5d8379d..0000000
--- a/runtime/interpreter/mterp/mips/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL($helper) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/control_flow.S b/runtime/interpreter/mterp/mips/control_flow.S
new file mode 100644
index 0000000..88e1f0e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/control_flow.S
@@ -0,0 +1,214 @@
+%def bincmp(condition=""):
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a0, a0) # a0 <- vA
+ FETCH_S(rINST, 1) # rINST<- branch offset, in code units
+ b${condition} a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
+ li t0, JIT_CHECK_OSR
+ beq rPROFILE, t0, .L_check_not_taken_osr
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def zcmp(condition=""):
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform.
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a0, a0) # a0 <- vAA
+ FETCH_S(rINST, 1) # rINST <- branch offset, in code units
+ b${condition} a0, zero, MterpCommonTakenBranchNoFlags
+ li t0, JIT_CHECK_OSR # possible OSR re-entry?
+ beq rPROFILE, t0, .L_check_not_taken_osr
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_goto():
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(rINST, 1) # rINST <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
+ b MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+% bincmp(condition="eq")
+
+%def op_if_eqz():
+% zcmp(condition="eq")
+
+%def op_if_ge():
+% bincmp(condition="ge")
+
+%def op_if_gez():
+% zcmp(condition="ge")
+
+%def op_if_gt():
+% bincmp(condition="gt")
+
+%def op_if_gtz():
+% zcmp(condition="gt")
+
+%def op_if_le():
+% bincmp(condition="le")
+
+%def op_if_lez():
+% zcmp(condition="le")
+
+%def op_if_lt():
+% bincmp(condition="lt")
+
+%def op_if_ltz():
+% zcmp(condition="lt")
+
+%def op_if_ne():
+% bincmp(condition="ne")
+
+%def op_if_nez():
+% zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL($func) # a0 <- code-unit branch offset
+ move rINST, v0
+ b MterpCommonTakenBranchNoFlags
+
+%def op_return():
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(v0, a2) # v0 <- vAA
+ move v1, zero
+ b MterpReturn
+
+%def op_return_object():
+% op_return()
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
+
+%def op_return_void_no_barrier():
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ move v0, zero
+ move v1, zero
+ b MterpReturn
+
+%def op_return_wide():
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ JAL(MterpThreadFenceForConstructor)
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqz ra, 1f
+ JAL(MterpSuspendCheck) # (self)
+1:
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
+ b MterpReturn
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC() # exception handler can throw
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (exception object)
+ # null object?
+ beqz a1, common_errNullObject # yes, throw an NPE instead
+ sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S
deleted file mode 100644
index d342354..0000000
--- a/runtime/interpreter/mterp/mips/entry.S
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
- .global ExecuteMterpImpl
- .ent ExecuteMterpImpl
- .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
- .cfi_startproc
- .set noreorder
- .cpload t9
- .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
- STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
- .cprestore STACK_OFFSET_GP
-
- /* Remember the return register */
- sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
- EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
- lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
- EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
- EXPORT_PC()
-
- /* Starting ibase */
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
- /* Set up for backwards branches & osr profiling */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- JAL(MterpSetUpHotnessCountdown) # (method, shadow_frame, self)
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST() # load rINST from rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips/fallback.S b/runtime/interpreter/mterp/mips/fallback.S
deleted file mode 100644
index 82cbc63..0000000
--- a/runtime/interpreter/mterp/mips/fallback.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Transfer stub to alternate interpreter */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/fbinop.S b/runtime/interpreter/mterp/mips/fbinop.S
deleted file mode 100644
index 6c1468c..0000000
--- a/runtime/interpreter/mterp/mips/fbinop.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- $instr # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinop2addr.S b/runtime/interpreter/mterp/mips/fbinop2addr.S
deleted file mode 100644
index 2caaf9c..0000000
--- a/runtime/interpreter/mterp/mips/fbinop2addr.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- $instr
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/fbinopWide.S b/runtime/interpreter/mterp/mips/fbinopWide.S
deleted file mode 100644
index a1fe91e..0000000
--- a/runtime/interpreter/mterp/mips/fbinopWide.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- $instr
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/fbinopWide2addr.S b/runtime/interpreter/mterp/mips/fbinopWide2addr.S
deleted file mode 100644
index 7303441..0000000
--- a/runtime/interpreter/mterp/mips/fbinopWide2addr.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $instr
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/floating_point.S b/runtime/interpreter/mterp/mips/floating_point.S
new file mode 100644
index 0000000..20df51e
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/floating_point.S
@@ -0,0 +1,518 @@
+%def fbinop(instr=""):
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $instr # f0 = result
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
+
+%def fbinop2addr(instr=""):
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr"
+ * that specifies an instruction that performs "fv0 = fa0 op fa1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $instr
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
+
+%def fbinopWide(instr=""):
+ /*
+ * Generic 64-bit floating-point binary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = fa0 op fa1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * for: add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $instr
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
+
+%def fbinopWide2addr(instr=""):
+ /*
+ * Generic 64-bit floating-point "/2addr" binary operation.
+ * Provide an "instr" line that specifies an instruction that
+ * performs "fv0 = fa0 op fa1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
+ LOAD64_F(fa0, fa0f, t0)
+ LOAD64_F(fa1, fa1f, a1)
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
+
+%def funop(instr=""):
+ /*
+ * Generic 32-bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
+
+%def funopWider(instr=""):
+ /*
+ * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
+ * line that specifies an instruction that performs "fv0 = op fa0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $instr
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
+
+%def op_add_double():
+% fbinopWide(instr="add.d fv0, fa0, fa1")
+
+%def op_add_double_2addr():
+% fbinopWide2addr(instr="add.d fv0, fa0, fa1")
+
+%def op_add_float():
+% fbinop(instr="add.s fv0, fa0, fa1")
+
+%def op_add_float_2addr():
+% fbinop2addr(instr="add.s fv0, fa0, fa1")
+
+%def op_cmpg_double():
+% op_cmpl_double(gt_bias="1")
+
+%def op_cmpg_float():
+% op_cmpl_float(gt_bias="1")
+
+%def op_cmpl_double(gt_bias="0"):
+ /*
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register based on the comparison results.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # rOBJ <- BB
+ srl t0, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
+ EAS2(t0, rFP, t0) # t0 <- &fp[CC]
+ LOAD64_F(ft0, ft0f, rOBJ)
+ LOAD64_F(ft1, ft1f, t0)
+#ifdef MIPS32REVGE6
+ cmp.eq.d ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.d ft2, ft0, ft1
+ li rTEMP, -1
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
+ c.eq.d fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ c.olt.d fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.d fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+%def op_cmpl_float(gt_bias="0"):
+ /*
+ * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
+ * into the destination register based on the comparison results.
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+ GET_VREG_F(ft0, a2)
+ GET_VREG_F(ft1, a3)
+#ifdef MIPS32REVGE6
+ cmp.eq.s ft2, ft0, ft1
+ li rTEMP, 0
+ bc1nez ft2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.s ft2, ft0, ft1
+ li rTEMP, -1
+ bc1nez ft2, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s ft2, ft1, ft0
+ li rTEMP, 1
+ bc1nez ft2, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#else
+ c.eq.s fcc0, ft0, ft1
+ li rTEMP, 0
+ bc1t fcc0, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ c.olt.s fcc0, ft0, ft1
+ li rTEMP, -1
+ bc1t fcc0, 1f # done if vBB < vCC (ordered)
+ li rTEMP, 1 # vBB > vCC or unordered
+ .else
+ c.olt.s fcc0, ft1, ft0
+ li rTEMP, 1
+ bc1t fcc0, 1f # done if vBB > vCC (ordered)
+ li rTEMP, -1 # vBB < vCC or unordered
+ .endif
+#endif
+1:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+%def op_div_double():
+% fbinopWide(instr="div.d fv0, fa0, fa1")
+
+%def op_div_double_2addr():
+% fbinopWide2addr(instr="div.d fv0, fa0, fa1")
+
+%def op_div_float():
+% fbinop(instr="div.s fv0, fa0, fa1")
+
+%def op_div_float_2addr():
+% fbinop2addr(instr="div.s fv0, fa0, fa1")
+
+%def op_double_to_float():
+% unopNarrower(instr="cvt.s.d fv0, fa0")
+
+%def op_double_to_int():
+ /*
+ * double-to-int
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifndef MIPS32REVGE6
+ li t0, INT_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+ c.ole.d fcc0, fa1, fa0
+#endif
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.d fcc0, fa0, fa0
+ mtc1 zero, fa0
+ MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
+ movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
+1:
+#endif
+ trunc.w.d fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
+
+%def op_double_to_long():
+ /*
+ * double-to-long
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64_F(fa0, fa0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ trunc.l.d fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
+#else
+ c.eq.d fcc0, fa0, fa0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1f fcc0, .L${opcode}_get_opcode
+
+ li t0, LONG_MIN_AS_DOUBLE_HIGH
+ mtc1 zero, fa1
+ MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
+ c.ole.d fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ neg.d fa1, fa1
+ c.ole.d fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ JAL(__fixdfdi)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ b .L${opcode}_set_vreg
+#endif
+%def op_double_to_long_helper_code():
+
+#ifndef MIPS32REVGE6
+.Lop_double_to_long_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.Lop_double_to_long_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
+#endif
+
+%def op_float_to_double():
+% funopWider(instr="cvt.d.s fv0, fa0")
+
+%def op_float_to_int():
+ /*
+ * float-to-int
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+#ifndef MIPS32REVGE6
+ li t0, INT_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ c.ole.s fcc0, fa1, fa0
+#endif
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+#ifndef MIPS32REVGE6
+ bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
+ c.eq.s fcc0, fa0, fa0
+ mtc1 zero, fa0
+ movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
+1:
+#endif
+ trunc.w.s fa0, fa0
+ SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
+
+%def op_float_to_long():
+ /*
+ * float-to-long
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us
+ * for pre-R6.
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG_F(fa0, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+#ifdef MIPS32REVGE6
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ trunc.l.s fa0, fa0
+ SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
+#else
+ c.eq.s fcc0, fa0, fa0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1f fcc0, .L${opcode}_get_opcode
+
+ li t0, LONG_MIN_AS_FLOAT
+ mtc1 t0, fa1
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT1, LONG_MIN_HIGH
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ neg.s fa1, fa1
+ c.ole.s fcc0, fa1, fa0
+ nor rRESULT0, rRESULT0, zero
+ nor rRESULT1, rRESULT1, zero
+ bc1t fcc0, .L${opcode}_get_opcode
+
+ JAL(__fixsfdi)
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ b .L${opcode}_set_vreg
+#endif
+%def op_float_to_long_helper_code():
+
+#ifndef MIPS32REVGE6
+.Lop_float_to_long_get_opcode:
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+.Lop_float_to_long_set_vreg:
+ SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
+#endif
+
+%def op_int_to_double():
+% funopWider(instr="cvt.d.w fv0, fa0")
+
+%def op_int_to_float():
+% funop(instr="cvt.s.w fv0, fa0")
+
+%def op_long_to_double():
+ /*
+ * long-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.d.l fv0, fv0
+#else
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+#endif
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
+
+%def op_long_to_float():
+ /*
+ * long-to-float
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+
+#ifdef MIPS32REVGE6
+ LOAD64_F(fv0, fv0f, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ cvt.s.l fv0, fv0
+#else
+ LOAD64(rARG0, rARG1, a3)
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(__floatdisf)
+#endif
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
+
+%def op_mul_double():
+% fbinopWide(instr="mul.d fv0, fa0, fa1")
+
+%def op_mul_double_2addr():
+% fbinopWide2addr(instr="mul.d fv0, fa0, fa1")
+
+%def op_mul_float():
+% fbinop(instr="mul.s fv0, fa0, fa1")
+
+%def op_mul_float_2addr():
+% fbinop2addr(instr="mul.s fv0, fa0, fa1")
+
+%def op_neg_double():
+% unopWide(instr="addu a1, a1, 0x80000000")
+
+%def op_neg_float():
+% unop(instr="addu a0, a0, 0x80000000")
+
+%def op_rem_double():
+% fbinopWide(instr="JAL(fmod)")
+
+%def op_rem_double_2addr():
+% fbinopWide2addr(instr="JAL(fmod)")
+
+%def op_rem_float():
+% fbinop(instr="JAL(fmodf)")
+
+%def op_rem_float_2addr():
+% fbinop2addr(instr="JAL(fmodf)")
+
+%def op_sub_double():
+% fbinopWide(instr="sub.d fv0, fa0, fa1")
+
+%def op_sub_double_2addr():
+% fbinopWide2addr(instr="sub.d fv0, fa0, fa1")
+
+%def op_sub_float():
+% fbinop(instr="sub.s fv0, fa0, fa1")
+
+%def op_sub_float_2addr():
+% fbinop2addr(instr="sub.s fv0, fa0, fa1")
diff --git a/runtime/interpreter/mterp/mips/footer.S b/runtime/interpreter/mterp/mips/footer.S
deleted file mode 100644
index 1c784ef..0000000
--- a/runtime/interpreter/mterp/mips/footer.S
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogDivideByZeroException)
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogArrayIndexException)
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNegativeArraySizeException)
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNoSuchMethodException)
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNullObjectException)
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogExceptionThrownException)
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- lw a2, THREAD_FLAGS_OFFSET(rSELF)
- JAL(MterpLogSuspendFallback)
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqz a0, MterpFallback # If exception, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpHandleException) # (self, shadow_frame)
- beqz v0, MterpExceptionReturn # no local catch, back to caller.
- lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lw a1, OFF_FP_DEX_PC(rFP)
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
- EAS1(rPC, a0, a1) # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC()
- FETCH_INST()
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
- bgtz rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_osr_check
- blt rPROFILE, t0, .L_resume_backward_branch
- subu rPROFILE, 1
- beqz rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE()
- addu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnez ra, .L_suspend_request_pending
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC()
- move a0, rSELF
- JAL(MterpSuspendCheck) # (self)
- bnez v0, MterpFallback
- REFRESH_IBASE() # might have changed during suspend
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_no_count_backwards:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- bne rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- beq rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
- add a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- addu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- lw a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- JAL(MterpLogOSR)
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- move v0, zero # signal retry with reference interpreter.
- b MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-MterpReturn:
- lw a2, OFF_FP_RESULT_REGISTER(rFP)
- sw v0, 0(a2)
- sw v1, 4(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
- STACK_LOAD_FULL()
- jalr zero, ra
-
- .cfi_endproc
- .end ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips/funop.S b/runtime/interpreter/mterp/mips/funop.S
deleted file mode 100644
index b2b22c9..0000000
--- a/runtime/interpreter/mterp/mips/funop.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /*
- * Generic 32-bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $instr
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/funopWider.S b/runtime/interpreter/mterp/mips/funopWider.S
deleted file mode 100644
index 6862e24..0000000
--- a/runtime/interpreter/mterp/mips/funopWider.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $instr
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
diff --git a/runtime/interpreter/mterp/mips/header.S b/runtime/interpreter/mterp/mips/header.S
deleted file mode 100644
index bef9eeb..0000000
--- a/runtime/interpreter/mterp/mips/header.S
+++ /dev/null
@@ -1,727 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2 /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64 /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6 /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rIBASE interpreted instruction base pointer, used for computed goto
- s4 rINST first 16-bit code unit of current instruction
- s5 rOBJ object pointer
- s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s7 rTEMP used as temp storage that can survive a function call
- s8 rPROFILE branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero $$0 /* always zero */
-#define AT $$at /* assembler temp */
-#define v0 $$2 /* return value */
-#define v1 $$3
-#define a0 $$4 /* argument registers */
-#define a1 $$5
-#define a2 $$6
-#define a3 $$7
-#define t0 $$8 /* temp registers (not saved across subroutine calls) */
-#define t1 $$9
-#define t2 $$10
-#define t3 $$11
-#define t4 $$12
-#define t5 $$13
-#define t6 $$14
-#define t7 $$15
-#define ta0 $$12 /* alias */
-#define ta1 $$13
-#define ta2 $$14
-#define ta3 $$15
-#define s0 $$16 /* saved across subroutine calls (callee saved) */
-#define s1 $$17
-#define s2 $$18
-#define s3 $$19
-#define s4 $$20
-#define s5 $$21
-#define s6 $$22
-#define s7 $$23
-#define t8 $$24 /* two more temp registers */
-#define t9 $$25
-#define k0 $$26 /* kernel temporary */
-#define k1 $$27
-#define gp $$28 /* global pointer */
-#define sp $$29 /* stack pointer */
-#define s8 $$30 /* one more callee saved */
-#define ra $$31 /* return address */
-
-/* FP register definitions */
-#define fv0 $$f0
-#define fv0f $$f1
-#define fv1 $$f2
-#define fv1f $$f3
-#define fa0 $$f12
-#define fa0f $$f13
-#define fa1 $$f14
-#define fa1f $$f15
-#define ft0 $$f4
-#define ft0f $$f5
-#define ft1 $$f6
-#define ft1f $$f7
-#define ft2 $$f8
-#define ft2f $$f9
-#define ft3 $$f10
-#define ft3f $$f11
-#define ft4 $$f16
-#define ft4f $$f17
-#define ft5 $$f18
-#define ft5f $$f19
-#define fs0 $$f20
-#define fs0f $$f21
-#define fs1 $$f22
-#define fs1f $$f23
-#define fs2 $$f24
-#define fs2f $$f25
-#define fs3 $$f26
-#define fs3f $$f27
-#define fs4 $$f28
-#define fs4f $$f29
-#define fs5 $$f30
-#define fs5f $$f31
-
-#ifndef MIPS32REVGE6
-#define fcc0 $$fcc0
-#define fcc1 $$fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
- seb rd, rt
-#define SEH(rd, rt) \
- seh rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- ins rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
- sll rd, rt, 24; \
- sra rd, rd, 24
-#define SEH(rd, rt) \
- sll rd, rt, 16; \
- sra rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- sll rt_hi, rt_hi, 16; \
- or rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mthc1 r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mtc1 r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
- jic rt, 0
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- lsa rd, rs, rt, sa; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#else
-#define JR(rt) \
- jalr zero, rt
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- .set push; \
- .set noat; \
- sll AT, rs, sa; \
- addu rd, AT, rt; \
- .set pop; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
- subu tmp, rPC, tmp; \
- sra tmp, tmp, 1; \
- sw tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
- lhu rINST, ((_count)*2)(rPC); \
- addu rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd. Updates
- * rPC to point to the next instruction. "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
- addu rPC, rPC, rd; \
- lhu rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
- sll rd, rd, ${handler_size_bits}; \
- addu rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
- GET_OPCODE_TARGET(rd); \
- JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
- .set noat; \
- EAS2(AT, rFP, rix); \
- l.s rd, (AT); \
- .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- s.s rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- jalr zero, dst; \
- s.s rlo, 0(t8); \
- .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- lw rd, 0(AT); \
- .set at
-
-#define STORE_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- sw rd, 0(AT); \
- .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
- sw rlo, off(rbase); \
- sw rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
- lw rlo, off(rbase); \
- lw rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- .set noat; \
- mfhc1 AT, rlo; \
- sw AT, (off+4)(rbase); \
- .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- .set noat; \
- lw AT, (off+4)(rbase); \
- mthc1 AT, rlo; \
- .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- s.s rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- l.s rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP 84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
- STACK_STORE(ra, 124); \
- STACK_STORE(s8, 120); \
- STACK_STORE(s0, 116); \
- STACK_STORE(s1, 112); \
- STACK_STORE(s2, 108); \
- STACK_STORE(s3, 104); \
- STACK_STORE(s4, 100); \
- STACK_STORE(s5, 96); \
- STACK_STORE(s6, 92); \
- STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
- STACK_LOAD(s7, 88); \
- STACK_LOAD(s6, 92); \
- STACK_LOAD(s5, 96); \
- STACK_LOAD(s4, 100); \
- STACK_LOAD(s3, 104); \
- STACK_LOAD(s2, 108); \
- STACK_LOAD(s1, 112); \
- STACK_LOAD(s0, 116); \
- STACK_LOAD(s8, 120); \
- STACK_LOAD(ra, 124); \
- DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
-#define LONG_MIN_HIGH 0x80000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
diff --git a/runtime/interpreter/mterp/mips/instruction_end.S b/runtime/interpreter/mterp/mips/instruction_end.S
deleted file mode 100644
index 32c725c..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_end_alt.S b/runtime/interpreter/mterp/mips/instruction_end_alt.S
deleted file mode 100644
index f90916f..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end_alt.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_end_sister.S b/runtime/interpreter/mterp/mips/instruction_end_sister.S
deleted file mode 100644
index c5f4886..0000000
--- a/runtime/interpreter/mterp/mips/instruction_end_sister.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/mips/instruction_start.S b/runtime/interpreter/mterp/mips/instruction_start.S
deleted file mode 100644
index 8874c20..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/mips/instruction_start_alt.S b/runtime/interpreter/mterp/mips/instruction_start_alt.S
deleted file mode 100644
index 0c9ffdb..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start_alt.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
diff --git a/runtime/interpreter/mterp/mips/instruction_start_sister.S b/runtime/interpreter/mterp/mips/instruction_start_sister.S
deleted file mode 100644
index 2ec51f7..0000000
--- a/runtime/interpreter/mterp/mips/instruction_start_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
index db3b8af..c77d12b 100644
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ b/runtime/interpreter/mterp/mips/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -17,3 +17,71 @@
bnez v0, MterpFallback
GET_INST_OPCODE(t0)
GOTO_OPCODE(t0)
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC()
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ JAL($helper)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(4)
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips/invoke_polymorphic.S b/runtime/interpreter/mterp/mips/invoke_polymorphic.S
deleted file mode 100644
index 5c963f0..0000000
--- a/runtime/interpreter/mterp/mips/invoke_polymorphic.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL($helper)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(4)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
diff --git a/runtime/interpreter/mterp/mips/main.S b/runtime/interpreter/mterp/mips/main.S
new file mode 100644
index 0000000..3ebd3d7
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/main.S
@@ -0,0 +1,1151 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32REVGE2 /* mips32r2 and greater */
+#if (__mips==32) && (__mips_isa_rev>=5)
+#define FPU64 /* 64 bit FPU */
+#if (__mips==32) && (__mips_isa_rev>=6)
+#define MIPS32REVGE6 /* mips32r6 and greater */
+#endif
+#endif
+#endif
+
+/* MIPS definitions and declarations
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rIBASE interpreted instruction base pointer, used for computed goto
+ s4 rINST first 16-bit code unit of current instruction
+ s5 rOBJ object pointer
+ s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+ s7 rTEMP used as temp storage that can survive a function call
+ s8 rPROFILE branch profiling countdown
+
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
+#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rREFS s6
+#define rTEMP s7
+#define rPROFILE s8
+
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+
+/* GP register definitions */
+#define zero $$0 /* always zero */
+#define AT $$at /* assembler temp */
+#define v0 $$2 /* return value */
+#define v1 $$3
+#define a0 $$4 /* argument registers */
+#define a1 $$5
+#define a2 $$6
+#define a3 $$7
+#define t0 $$8 /* temp registers (not saved across subroutine calls) */
+#define t1 $$9
+#define t2 $$10
+#define t3 $$11
+#define t4 $$12
+#define t5 $$13
+#define t6 $$14
+#define t7 $$15
+#define ta0 $$12 /* alias */
+#define ta1 $$13
+#define ta2 $$14
+#define ta3 $$15
+#define s0 $$16 /* saved across subroutine calls (callee saved) */
+#define s1 $$17
+#define s2 $$18
+#define s3 $$19
+#define s4 $$20
+#define s5 $$21
+#define s6 $$22
+#define s7 $$23
+#define t8 $$24 /* two more temp registers */
+#define t9 $$25
+#define k0 $$26 /* kernel temporary */
+#define k1 $$27
+#define gp $$28 /* global pointer */
+#define sp $$29 /* stack pointer */
+#define s8 $$30 /* one more callee saved */
+#define ra $$31 /* return address */
+
+/* FP register definitions */
+#define fv0 $$f0
+#define fv0f $$f1
+#define fv1 $$f2
+#define fv1f $$f3
+#define fa0 $$f12
+#define fa0f $$f13
+#define fa1 $$f14
+#define fa1f $$f15
+#define ft0 $$f4
+#define ft0f $$f5
+#define ft1 $$f6
+#define ft1f $$f7
+#define ft2 $$f8
+#define ft2f $$f9
+#define ft3 $$f10
+#define ft3f $$f11
+#define ft4 $$f16
+#define ft4f $$f17
+#define ft5 $$f18
+#define ft5f $$f19
+#define fs0 $$f20
+#define fs0f $$f21
+#define fs1 $$f22
+#define fs1f $$f23
+#define fs2 $$f24
+#define fs2f $$f25
+#define fs3 $$f26
+#define fs3f $$f27
+#define fs4 $$f28
+#define fs4f $$f29
+#define fs5 $$f30
+#define fs5f $$f31
+
+#ifndef MIPS32REVGE6
+#define fcc0 $$fcc0
+#define fcc1 $$fcc1
+#endif
+
+#ifdef MIPS32REVGE2
+#define SEB(rd, rt) \
+ seb rd, rt
+#define SEH(rd, rt) \
+ seh rd, rt
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ ins rd_lo, rt_hi, 16, 16
+#else
+#define SEB(rd, rt) \
+ sll rd, rt, 24; \
+ sra rd, rd, 24
+#define SEH(rd, rt) \
+ sll rd, rt, 16; \
+ sra rd, rd, 16
+/* Clobbers rt_hi on pre-R2. */
+#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
+ sll rt_hi, rt_hi, 16; \
+ or rd_lo, rt_hi
+#endif
+
+#ifdef FPU64
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mthc1 r, flo
+#else
+#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
+ mtc1 r, fhi
+#endif
+
+#ifdef MIPS32REVGE6
+#define JR(rt) \
+ jic rt, 0
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ lsa rd, rs, rt, sa; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#else
+#define JR(rt) \
+ jalr zero, rt
+#define LSA(rd, rs, rt, sa) \
+ .if sa; \
+ .set push; \
+ .set noat; \
+ sll AT, rs, sa; \
+ addu rd, AT, rt; \
+ .set pop; \
+ .else; \
+ addu rd, rs, rt; \
+ .endif
+#endif
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+#define EXPORT_PC() \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP)
+
+#define EXPORT_DEX_PC(tmp) \
+ lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
+ sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
+ subu tmp, rPC, tmp; \
+ sra tmp, tmp, 1; \
+ sw tmp, OFF_FP_DEX_PC(rFP)
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() lhu rINST, (rPC)
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) \
+ lhu rINST, ((_count)*2)(rPC); \
+ addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
+
+/* Advance rPC by some number of code units. */
+#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
+
+/*
+ * Fetch the next instruction from an offset specified by rd. Updates
+ * rPC to point to the next instruction. "rd" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ */
+#define FETCH_ADVANCE_INST_RB(rd) \
+ addu rPC, rPC, rd; \
+ lhu rINST, (rPC)
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Transform opcode into branch target address.
+ */
+#define GET_OPCODE_TARGET(rd) \
+ sll rd, rd, ${handler_size_bits}; \
+ addu rd, rIBASE, rd
+
+/*
+ * Begin executing the opcode in rd.
+ */
+#define GOTO_OPCODE(rd) \
+ GET_OPCODE_TARGET(rd); \
+ JR(rd)
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) \
+ .set noat; \
+ EAS2(AT, rFP, rix); \
+ l.s rd, (AT); \
+ .set at
+
+#ifdef MIPS32REVGE6
+#define SET_VREG(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw rd, 0(t8)
+#else
+#define SET_VREG_OBJECT(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw rd, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#else
+#define SET_VREG64(rlo, rhi, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG_F(rd, rix) \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8)
+#else
+#define SET_VREG_F(rd, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8)
+#endif
+
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F(rlo, rhi, rix) \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#elif defined(FPU64)
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ s.s rlo, 0(t8)
+#else
+#define SET_VREG64_F(rlo, rhi, rix) \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8)
+#endif
+
+/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw rd, 0(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#else
+#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ sw rlo, 0(t8); \
+ sw rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ s.s rd, 0(t8); \
+ lsa t8, rix, rREFS, 2; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG_F_GOTO(rd, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rd, 0(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ jalr zero, dst; \
+ sw zero, 0(t8); \
+ .set reorder
+#endif
+
+/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
+#ifdef MIPS32REVGE6
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ lsa t8, rix, rFP, 2; \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ s.s rlo, 0(t8); \
+ sw AT, 4(t8); \
+ .set at; \
+ lsa t8, rix, rREFS, 2; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#elif defined(FPU64)
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rREFS, AT; \
+ sw zero, 0(t8); \
+ sw zero, 4(t8); \
+ addu t8, rFP, AT; \
+ mfhc1 AT, rlo; \
+ sw AT, 4(t8); \
+ .set at; \
+ jalr zero, dst; \
+ s.s rlo, 0(t8); \
+ .set reorder
+#else
+#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
+ .set noreorder; \
+ GET_OPCODE_TARGET(dst); \
+ .set noat; \
+ sll AT, rix, 2; \
+ addu t8, rFP, AT; \
+ s.s rlo, 0(t8); \
+ s.s rhi, 4(t8); \
+ addu t8, rREFS, AT; \
+ .set at; \
+ sw zero, 0(t8); \
+ jalr zero, dst; \
+ sw zero, 4(t8); \
+ .set reorder
+#endif
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifdef MIPS32REVGE2
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#else
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+/*
+ * Form an Effective Address rd = rbase + roff<<shift;
+ * Uses reg AT on pre-R6.
+ */
+#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+#define LOAD_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ lw rd, 0(AT); \
+ .set at
+
+#define STORE_eas2(rd, rbase, roff) \
+ .set noat; \
+ EAS2(AT, rbase, roff); \
+ sw rd, 0(AT); \
+ .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#define STORE64_off(rlo, rhi, rbase, off) \
+ sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) \
+ lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#ifdef FPU64
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
+ .set noat; \
+ mfhc1 AT, rlo; \
+ sw AT, (off+4)(rbase); \
+ .set at
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
+ .set noat; \
+ lw AT, (off+4)(rbase); \
+ mthc1 AT, rlo; \
+ .set at
+#else
+#define STORE64_off_F(rlo, rhi, rbase, off) \
+ s.s rlo, off(rbase); \
+ s.s rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) \
+ l.s rlo, off(rbase); \
+ l.s rhi, (off+4)(rbase)
+#endif
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_GP 84
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+/*
+ * FP register usage restrictions:
+ * 1) We don't use the callee save FP registers so we don't have to save them.
+ * 2) We don't use the odd FP registers so we can share code with mips32r6.
+ */
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(ra, 124); \
+ STACK_STORE(s8, 120); \
+ STACK_STORE(s0, 116); \
+ STACK_STORE(s1, 112); \
+ STACK_STORE(s2, 108); \
+ STACK_STORE(s3, 104); \
+ STACK_STORE(s4, 100); \
+ STACK_STORE(s5, 96); \
+ STACK_STORE(s6, 92); \
+ STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+ STACK_LOAD(s7, 88); \
+ STACK_LOAD(s6, 92); \
+ STACK_LOAD(s5, 96); \
+ STACK_LOAD(s4, 100); \
+ STACK_LOAD(s3, 104); \
+ STACK_LOAD(s2, 108); \
+ STACK_LOAD(s1, 112); \
+ STACK_LOAD(s0, 116); \
+ STACK_LOAD(s8, 120); \
+ STACK_LOAD(ra, 124); \
+ DELETE_STACK(STACK_SIZE)
+
+#define REFRESH_IBASE() \
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
+#define LONG_MIN_HIGH 0x80000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+ .text
+ .align 2
+ .global ExecuteMterpImpl
+ .ent ExecuteMterpImpl
+ .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 dex_instructions
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+
+ExecuteMterpImpl:
+ .cfi_startproc
+ .set noreorder
+ .cpload t9
+ .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+ STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+ .cprestore STACK_OFFSET_GP
+
+ /* Remember the return register */
+ sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the dex instruction pointer */
+ sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
+ EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
+ lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
+ EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+
+ EXPORT_PC()
+
+ /* Starting ibase */
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+
+ /* Set up for backwards branches & osr profiling */
+ lw a0, OFF_FP_METHOD(rFP)
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rSELF
+ JAL(MterpSetUpHotnessCountdown) # (method, shadow_frame, self)
+ move rPROFILE, v0 # Starting hotness countdown to rPROFILE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* NOTE: no fallthrough */
+
+%def alt_stub():
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ la ra, artMterpAsmInstructionStart + (${opnum} * 128) # Addr of primary handler
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
+ move a0, rSELF # arg0
+ addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
+ move a2, rPC
+ la t9, MterpCheckBefore
+ jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+%def helpers():
+% op_float_to_long_helper_code()
+% op_double_to_long_helper_code()
+% op_mul_long_helper_code()
+% op_shl_long_helper_code()
+% op_shr_long_helper_code()
+% op_ushr_long_helper_code()
+% op_shl_long_2addr_helper_code()
+% op_shr_long_2addr_helper_code()
+% op_ushr_long_2addr_helper_code()
+
+%def footer():
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogDivideByZeroException)
+#endif
+ b MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogArrayIndexException)
+#endif
+ b MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNegativeArraySizeException)
+#endif
+ b MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNoSuchMethodException)
+#endif
+ b MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogNullObjectException)
+#endif
+ b MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogExceptionThrownException)
+#endif
+ b MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ lw a2, THREAD_FLAGS_OFFSET(rSELF)
+ JAL(MterpLogSuspendFallback)
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqz a0, MterpFallback # If exception, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpHandleException) # (self, shadow_frame)
+ beqz v0, MterpExceptionReturn # no local catch, back to caller.
+ lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
+ lw a1, OFF_FP_DEX_PC(rFP)
+ lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+ EAS1(rPC, a0, a1) # generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ JAL(MterpShouldSwitchInterpreters)
+ bnez v0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC()
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+ /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * rINST <= signed offset
+ * rPROFILE <= signed hotness countdown (expanded to 32 bits)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ */
+MterpCommonTakenBranchNoFlags:
+ bgtz rINST, .L_forward_branch # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+# error "JIT_CHECK_OSR must be -1."
+#endif
+ li t0, JIT_CHECK_OSR
+ beq rPROFILE, t0, .L_osr_check
+ blt rPROFILE, t0, .L_resume_backward_branch
+ subu rPROFILE, 1
+ beqz rPROFILE, .L_add_batch # counted down to zero - report
+.L_resume_backward_branch:
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ REFRESH_IBASE()
+ addu a2, rINST, rINST # a2<- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ bnez ra, .L_suspend_request_pending
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+.L_suspend_request_pending:
+ EXPORT_PC()
+ move a0, rSELF
+ JAL(MterpSuspendCheck) # (self)
+ bnez v0, MterpFallback
+ REFRESH_IBASE() # might have changed during suspend
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+.L_no_count_backwards:
+ li t0, JIT_CHECK_OSR # check for possible OSR re-entry
+ bne rPROFILE, t0, .L_resume_backward_branch
+.L_osr_check:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ EXPORT_PC()
+ JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement
+ b .L_resume_backward_branch
+
+.L_forward_branch:
+ li t0, JIT_CHECK_OSR # check for possible OSR re-entry
+ beq rPROFILE, t0, .L_check_osr_forward
+.L_resume_forward_branch:
+ add a2, rINST, rINST # a2<- byte offset
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+.L_check_osr_forward:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ EXPORT_PC()
+ JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement
+ b .L_resume_forward_branch
+
+.L_add_batch:
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+ lw a0, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ move rPROFILE, v0 # restore new hotness countdown to rPROFILE
+ b .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ li a2, 2
+ EXPORT_PC()
+ JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ bnez v0, MterpOnStackReplacement
+ FETCH_ADVANCE_INST(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ JAL(MterpLogOSR)
+#endif
+ li v0, 1 # Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC()
+#if MTERP_LOGGING
+ move a0, rSELF
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ JAL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ move v0, zero # signal retry with reference interpreter.
+ b MterpDone
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+MterpReturn:
+ lw a2, OFF_FP_RESULT_REGISTER(rFP)
+ sw v0, 0(a2)
+ sw v1, 4(a2)
+ li v0, 1 # signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
+
+MterpProfileActive:
+ move rINST, v0 # stash return value
+ /* Report cached hotness counts */
+ lw a0, OFF_FP_METHOD(rFP)
+ addu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rSELF
+ sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+ JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ move v0, rINST # restore return value
+
+.L_pop_and_return:
+/* Restore from the stack and return. Frame size = STACK_SIZE */
+ STACK_LOAD_FULL()
+ jalr zero, ra
+
+ .cfi_endproc
+ .end ExecuteMterpImpl
+
+%def instruction_end():
+
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_end_alt():
+
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+
+%def instruction_start():
+
+ .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+%def instruction_start_alt():
+
+ .global artMterpAsmAltInstructionStart
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+ .text
+
+%def opcode_start():
+% pass
+%def opcode_end():
+% pass
diff --git a/runtime/interpreter/mterp/mips/object.S b/runtime/interpreter/mterp/mips/object.S
new file mode 100644
index 0000000..a987789
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/object.S
@@ -0,0 +1,257 @@
+%def field(helper=""):
+TODO
+
+%def op_check_cast():
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
+ PREFETCH_INST(2)
+ bnez v0, MterpPossibleException
+ ADVANCE(2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+% field(helper=helper)
+
+%def op_iget_boolean():
+% op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="lbu")
+
+%def op_iget_byte():
+% op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="lb")
+
+%def op_iget_char():
+% op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="lhu")
+
+%def op_iget_object():
+% op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ FETCH(a1, 1) # a1 <- field byte offset
+ EXPORT_PC()
+ GET_VREG(a0, a2) # a0 <- object we're operating on
+ JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ GET_OPA4(a2) # a2<- A+
+ PREFETCH_INST(2) # load rINST
+ bnez a3, MterpPossibleException # bail out
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
+
+%def op_iget_quick(load="lw"):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1
+ $load a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
+
+%def op_iget_short():
+% op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="lh")
+
+%def op_iget_wide():
+% op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 # t0 <- a3 + a1
+ LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
+
+%def op_instance_of():
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- CCCC
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &object
+ lw a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
+ lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ PREFETCH_INST(2) # load rINST
+ bnez a1, MterpException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(store="sb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(store="sb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(store="sh")
+
+%def op_iput_object():
+% op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ /* For: iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ JAL(MterpIputObjectQuick)
+ beqz v0, MterpException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_iput_quick(store="sw"):
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GET_OPCODE_TARGET(t1)
+ $store a0, 0(t0) # obj.field (8/16/32 bits) <- a0
+ JR(t1) # jump to next instruction
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(store="sh")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ GET_OPA4(a0) # a0 <- A(+)
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
+ # check object for null
+ beqz a2, common_errNullObject # object was null
+ EAS2(a3, rFP, a0) # a3 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
+ FETCH(a3, 1) # a3 <- field byte offset
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ JR(t0) # jump to next instruction
+
+%def op_new_instance():
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC()
+ addu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ JAL(MterpNewInstance)
+ beqz v0, MterpPossibleException
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips/op_add_double.S b/runtime/interpreter/mterp/mips/op_add_double.S
deleted file mode 100644
index 12ef0cf..0000000
--- a/runtime/interpreter/mterp/mips/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_double_2addr.S b/runtime/interpreter/mterp/mips/op_add_double_2addr.S
deleted file mode 100644
index c57add5..0000000
--- a/runtime/interpreter/mterp/mips/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"add.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float.S b/runtime/interpreter/mterp/mips/op_add_float.S
deleted file mode 100644
index 6a46cf0..0000000
--- a/runtime/interpreter/mterp/mips/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_float_2addr.S b/runtime/interpreter/mterp/mips/op_add_float_2addr.S
deleted file mode 100644
index 6ab5cc1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"add.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int.S b/runtime/interpreter/mterp/mips/op_add_int.S
deleted file mode 100644
index 53a0cb1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_2addr.S b/runtime/interpreter/mterp/mips/op_add_int_2addr.S
deleted file mode 100644
index ddd9214..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit16.S b/runtime/interpreter/mterp/mips/op_add_int_lit16.S
deleted file mode 100644
index 05535c1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_int_lit8.S b/runtime/interpreter/mterp/mips/op_add_int_lit8.S
deleted file mode 100644
index fd021b3..0000000
--- a/runtime/interpreter/mterp/mips/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_add_long.S b/runtime/interpreter/mterp/mips/op_add_long.S
deleted file mode 100644
index faacc6a..0000000
--- a/runtime/interpreter/mterp/mips/op_add_long.S
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * The compiler generates the following sequence for
- * [v1 v0] = [a1 a0] + [a3 a2];
- * addu v0,a2,a0
- * addu a1,a3,a1
- * sltu v1,v0,a2
- * addu v1,v1,a1
- */
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_add_long_2addr.S b/runtime/interpreter/mterp/mips/op_add_long_2addr.S
deleted file mode 100644
index bf827c1..0000000
--- a/runtime/interpreter/mterp/mips/op_add_long_2addr.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * See op_add_long.S for details
- */
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/runtime/interpreter/mterp/mips/op_aget.S b/runtime/interpreter/mterp/mips/op_aget.S
deleted file mode 100644
index e88402c..0000000
--- a/runtime/interpreter/mterp/mips/op_aget.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- $load a2, $data_offset(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
diff --git a/runtime/interpreter/mterp/mips/op_aget_boolean.S b/runtime/interpreter/mterp/mips/op_aget_boolean.S
deleted file mode 100644
index 59f7f82..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_byte.S b/runtime/interpreter/mterp/mips/op_aget_byte.S
deleted file mode 100644
index 11038fa..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_char.S b/runtime/interpreter/mterp/mips/op_aget_char.S
deleted file mode 100644
index 96f2ab6..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_object.S b/runtime/interpreter/mterp/mips/op_aget_object.S
deleted file mode 100644
index 9c49dfe..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_object.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_aget_short.S b/runtime/interpreter/mterp/mips/op_aget_short.S
deleted file mode 100644
index cd7f7bf..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aget_wide.S b/runtime/interpreter/mterp/mips/op_aget_wide.S
deleted file mode 100644
index 08822f5..0000000
--- a/runtime/interpreter/mterp/mips/op_aget_wide.S
+++ /dev/null
@@ -1,22 +0,0 @@
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
diff --git a/runtime/interpreter/mterp/mips/op_and_int.S b/runtime/interpreter/mterp/mips/op_and_int.S
deleted file mode 100644
index 98fe4af..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_2addr.S b/runtime/interpreter/mterp/mips/op_and_int_2addr.S
deleted file mode 100644
index 7f90ed4..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit16.S b/runtime/interpreter/mterp/mips/op_and_int_lit16.S
deleted file mode 100644
index e46f23b..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_int_lit8.S b/runtime/interpreter/mterp/mips/op_and_int_lit8.S
deleted file mode 100644
index 3332883..0000000
--- a/runtime/interpreter/mterp/mips/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long.S b/runtime/interpreter/mterp/mips/op_and_long.S
deleted file mode 100644
index a98a6df..0000000
--- a/runtime/interpreter/mterp/mips/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_and_long_2addr.S b/runtime/interpreter/mterp/mips/op_and_long_2addr.S
deleted file mode 100644
index 350c044..0000000
--- a/runtime/interpreter/mterp/mips/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_aput.S b/runtime/interpreter/mterp/mips/op_aput.S
deleted file mode 100644
index 46dcaee..0000000
--- a/runtime/interpreter/mterp/mips/op_aput.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- $store a2, $data_offset(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_boolean.S b/runtime/interpreter/mterp/mips/op_aput_boolean.S
deleted file mode 100644
index 9cae5ef..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_byte.S b/runtime/interpreter/mterp/mips/op_aput_byte.S
deleted file mode 100644
index 3bbd12c..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_char.S b/runtime/interpreter/mterp/mips/op_aput_char.S
deleted file mode 100644
index ae69717..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_object.S b/runtime/interpreter/mterp/mips/op_aput_object.S
deleted file mode 100644
index 55b13b1..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- *
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpAputObject)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_aput_short.S b/runtime/interpreter/mterp/mips/op_aput_short.S
deleted file mode 100644
index 9586259..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips/op_aput_wide.S b/runtime/interpreter/mterp/mips/op_aput_wide.S
deleted file mode 100644
index c3cff56..0000000
--- a/runtime/interpreter/mterp/mips/op_aput_wide.S
+++ /dev/null
@@ -1,24 +0,0 @@
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t0) # t0 <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
- # compare unsigned index, length
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
- JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_array_length.S b/runtime/interpreter/mterp/mips/op_array_length.S
deleted file mode 100644
index ae2fe68..0000000
--- a/runtime/interpreter/mterp/mips/op_array_length.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Return the length of an array.
- */
- /* array-length vA, vB */
- GET_OPB(a1) # a1 <- B
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a0, a1) # a0 <- vB (object ref)
- # is object null?
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a3, a2, t0) # vA <- length
diff --git a/runtime/interpreter/mterp/mips/op_check_cast.S b/runtime/interpreter/mterp/mips/op_check_cast.S
deleted file mode 100644
index 3875ce6..0000000
--- a/runtime/interpreter/mterp/mips/op_check_cast.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
- PREFETCH_INST(2)
- bnez v0, MterpPossibleException
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_cmp_long.S b/runtime/interpreter/mterp/mips/op_cmp_long.S
deleted file mode 100644
index 44806c3..0000000
--- a/runtime/interpreter/mterp/mips/op_cmp_long.S
+++ /dev/null
@@ -1,34 +0,0 @@
- /*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * I think I can improve on the ARM code by the following observation
- * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
- * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
- * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(a3, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- slt t0, a1, a3 # compare hi
- sgt t1, a1, a3
- subu v0, t1, t0 # v0 <- (-1, 1, 0)
- bnez v0, .L${opcode}_finish
- # at this point x.hi==y.hi
- sltu t0, a0, a2 # compare lo
- sgtu t1, a0, a2
- subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
-
-.L${opcode}_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_double.S b/runtime/interpreter/mterp/mips/op_cmpg_double.S
deleted file mode 100644
index b2e7532..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_cmpl_double.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpg_float.S b/runtime/interpreter/mterp/mips/op_cmpg_float.S
deleted file mode 100644
index 76550b5..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_cmpl_float.S" { "gt_bias":"1" }
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_double.S b/runtime/interpreter/mterp/mips/op_cmpl_double.S
deleted file mode 100644
index 369e5b3..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpl_double.S
+++ /dev/null
@@ -1,52 +0,0 @@
-%default { "gt_bias":"0" }
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # rOBJ <- BB
- srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
- EAS2(t0, rFP, t0) # t0 <- &fp[CC]
- LOAD64_F(ft0, ft0f, rOBJ)
- LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
- cmp.eq.d ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.d fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- c.olt.d fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.d fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_cmpl_float.S b/runtime/interpreter/mterp/mips/op_cmpl_float.S
deleted file mode 100644
index 1dd5506..0000000
--- a/runtime/interpreter/mterp/mips/op_cmpl_float.S
+++ /dev/null
@@ -1,50 +0,0 @@
-%default { "gt_bias":"0" }
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * for: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8
- GET_VREG_F(ft0, a2)
- GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
- cmp.eq.s ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- cmp.lt.s ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.s fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- c.olt.s fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.s fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
diff --git a/runtime/interpreter/mterp/mips/op_const.S b/runtime/interpreter/mterp/mips/op_const.S
deleted file mode 100644
index bd9f873..0000000
--- a/runtime/interpreter/mterp/mips/op_const.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const vAA, +BBBBbbbb */
- GET_OPA(a3) # a3 <- AA
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_16.S b/runtime/interpreter/mterp/mips/op_const_16.S
deleted file mode 100644
index 2ffb30f..0000000
--- a/runtime/interpreter/mterp/mips/op_const_16.S
+++ /dev/null
@@ -1,6 +0,0 @@
- /* const/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_4.S b/runtime/interpreter/mterp/mips/op_const_4.S
deleted file mode 100644
index 6866c78..0000000
--- a/runtime/interpreter/mterp/mips/op_const_4.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const/4 vA, +B */
- sll a1, rINST, 16 # a1 <- Bxxx0000
- GET_OPA(a0) # a0 <- A+
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
- and a0, a0, 15
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
diff --git a/runtime/interpreter/mterp/mips/op_const_class.S b/runtime/interpreter/mterp/mips/op_const_class.S
deleted file mode 100644
index 5b3c968..0000000
--- a/runtime/interpreter/mterp/mips/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips/op_const_high16.S b/runtime/interpreter/mterp/mips/op_const_high16.S
deleted file mode 100644
index 5162402..0000000
--- a/runtime/interpreter/mterp/mips/op_const_high16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/high16 vAA, +BBBB0000 */
- FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- sll a0, a0, 16 # a0 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
diff --git a/runtime/interpreter/mterp/mips/op_const_method_handle.S b/runtime/interpreter/mterp/mips/op_const_method_handle.S
deleted file mode 100644
index 4011e43..0000000
--- a/runtime/interpreter/mterp/mips/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips/op_const_method_type.S b/runtime/interpreter/mterp/mips/op_const_method_type.S
deleted file mode 100644
index 18a5e0f..0000000
--- a/runtime/interpreter/mterp/mips/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips/op_const_string.S b/runtime/interpreter/mterp/mips/op_const_string.S
deleted file mode 100644
index 0bab6b4..0000000
--- a/runtime/interpreter/mterp/mips/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
deleted file mode 100644
index 54cec97..0000000
--- a/runtime/interpreter/mterp/mips/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* const/string vAA, string@BBBBBBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a2, 2) # a2 <- BBBB (high)
- GET_OPA(a1) # a1 <- AA
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(3) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(3) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_const_wide.S b/runtime/interpreter/mterp/mips/op_const_wide.S
deleted file mode 100644
index f8911e3..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* const-wide vAA, +HHHHhhhhBBBBbbbb */
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (low middle)
- FETCH(a2, 3) # a2 <- hhhh (high middle)
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
- FETCH(a3, 4) # a3 <- HHHH (high)
- GET_OPA(t1) # t1 <- AA
- INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
- FETCH_ADVANCE_INST(5) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_16.S b/runtime/interpreter/mterp/mips/op_const_wide_16.S
deleted file mode 100644
index 2ca5ab9..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const-wide/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- sra a1, a0, 31 # a1 <- ssssssss
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_32.S b/runtime/interpreter/mterp/mips/op_const_wide_32.S
deleted file mode 100644
index bf802ca..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const-wide/32 vAA, +BBBBbbbb */
- FETCH(a0, 1) # a0 <- 0000bbbb (low)
- GET_OPA(a3) # a3 <- AA
- FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- sra a1, a0, 31 # a1 <- ssssssss
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_const_wide_high16.S b/runtime/interpreter/mterp/mips/op_const_wide_high16.S
deleted file mode 100644
index 04b90fa..0000000
--- a/runtime/interpreter/mterp/mips/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const-wide/high16 vAA, +BBBB000000000000 */
- FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- li a0, 0 # a0 <- 00000000
- sll a1, 16 # a1 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_div_double.S b/runtime/interpreter/mterp/mips/op_div_double.S
deleted file mode 100644
index 84e4c4e..0000000
--- a/runtime/interpreter/mterp/mips/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_double_2addr.S b/runtime/interpreter/mterp/mips/op_div_double_2addr.S
deleted file mode 100644
index 65b92e3..0000000
--- a/runtime/interpreter/mterp/mips/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"div.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float.S b/runtime/interpreter/mterp/mips/op_div_float.S
deleted file mode 100644
index 44b8d47..0000000
--- a/runtime/interpreter/mterp/mips/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_float_2addr.S b/runtime/interpreter/mterp/mips/op_div_float_2addr.S
deleted file mode 100644
index e5fff92..0000000
--- a/runtime/interpreter/mterp/mips/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"div.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_int.S b/runtime/interpreter/mterp/mips/op_div_int.S
deleted file mode 100644
index 5d28c84..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_2addr.S b/runtime/interpreter/mterp/mips/op_div_int_2addr.S
deleted file mode 100644
index 6c079e0..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_2addr.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit16.S b/runtime/interpreter/mterp/mips/op_div_int_lit16.S
deleted file mode 100644
index ee7452c..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_lit16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_int_lit8.S b/runtime/interpreter/mterp/mips/op_div_int_lit8.S
deleted file mode 100644
index d2964b8..0000000
--- a/runtime/interpreter/mterp/mips/op_div_int_lit8.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mflo a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_div_long.S b/runtime/interpreter/mterp/mips/op_div_long.S
deleted file mode 100644
index 2097866..0000000
--- a/runtime/interpreter/mterp/mips/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_div_long_2addr.S b/runtime/interpreter/mterp/mips/op_div_long_2addr.S
deleted file mode 100644
index c279305..0000000
--- a/runtime/interpreter/mterp/mips/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_float.S b/runtime/interpreter/mterp/mips/op_double_to_float.S
deleted file mode 100644
index 1d32c2e..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopNarrower.S" {"instr":"cvt.s.d fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_double_to_int.S b/runtime/interpreter/mterp/mips/op_double_to_int.S
deleted file mode 100644
index 6d7c6ca..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_int.S
+++ /dev/null
@@ -1,31 +0,0 @@
- /*
- * double-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.d fcc0, fa0, fa0
- mtc1 zero, fa0
- MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
- movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
- trunc.w.d fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_double_to_long.S b/runtime/interpreter/mterp/mips/op_double_to_long.S
deleted file mode 100644
index 459ab7e..0000000
--- a/runtime/interpreter/mterp/mips/op_double_to_long.S
+++ /dev/null
@@ -1,50 +0,0 @@
- /*
- * double-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.d fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.d fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .L${opcode}_get_opcode
-
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .L${opcode}_get_opcode
-
- neg.d fa1, fa1
- c.ole.d fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .L${opcode}_get_opcode
-
- JAL(__fixdfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .L${opcode}_set_vreg
-#endif
-%break
-
-#ifndef MIPS32REVGE6
-.L${opcode}_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.L${opcode}_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_fill_array_data.S b/runtime/interpreter/mterp/mips/op_fill_array_data.S
deleted file mode 100644
index c3cd371..0000000
--- a/runtime/interpreter/mterp/mips/op_fill_array_data.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC()
- FETCH(a1, 1) # a1 <- bbbb (lo)
- FETCH(a0, 2) # a0 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
- GET_VREG(a0, a3) # a0 <- vAA (array object)
- EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
- JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array.S b/runtime/interpreter/mterp/mips/op_filled_new_array.S
deleted file mode 100644
index 9511578..0000000
--- a/runtime/interpreter/mterp/mips/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
- move a1, rPC
- move a2, rSELF
- JAL($helper) # v0 <- helper(shadow_frame, pc, self)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
deleted file mode 100644
index f8dcb0e..0000000
--- a/runtime/interpreter/mterp/mips/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips/op_float_to_double.S b/runtime/interpreter/mterp/mips/op_float_to_double.S
deleted file mode 100644
index 1315255..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funopWider.S" {"instr":"cvt.d.s fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_float_to_int.S b/runtime/interpreter/mterp/mips/op_float_to_int.S
deleted file mode 100644
index 26a0988..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_int.S
+++ /dev/null
@@ -1,29 +0,0 @@
- /*
- * float-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.s fcc0, fa0, fa0
- mtc1 zero, fa0
- movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
- trunc.w.s fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
diff --git a/runtime/interpreter/mterp/mips/op_float_to_long.S b/runtime/interpreter/mterp/mips/op_float_to_long.S
deleted file mode 100644
index b8f8efb..0000000
--- a/runtime/interpreter/mterp/mips/op_float_to_long.S
+++ /dev/null
@@ -1,48 +0,0 @@
- /*
- * float-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.s fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.s fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .L${opcode}_get_opcode
-
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .L${opcode}_get_opcode
-
- neg.s fa1, fa1
- c.ole.s fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .L${opcode}_get_opcode
-
- JAL(__fixsfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .L${opcode}_set_vreg
-#endif
-%break
-
-#ifndef MIPS32REVGE6
-.L${opcode}_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.L${opcode}_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_goto.S b/runtime/interpreter/mterp/mips/op_goto.S
deleted file mode 100644
index 57182a5..0000000
--- a/runtime/interpreter/mterp/mips/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sll a0, rINST, 16 # a0 <- AAxx0000
- sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_goto_16.S b/runtime/interpreter/mterp/mips/op_goto_16.S
deleted file mode 100644
index 06c96cd..0000000
--- a/runtime/interpreter/mterp/mips/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_goto_32.S b/runtime/interpreter/mterp/mips/op_goto_32.S
deleted file mode 100644
index ef5bf6b..0000000
--- a/runtime/interpreter/mterp/mips/op_goto_32.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- FETCH(rINST, 1) # rINST <- aaaa (lo)
- FETCH(a1, 2) # a1 <- AAAA (hi)
- INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_if_eq.S b/runtime/interpreter/mterp/mips/op_if_eq.S
deleted file mode 100644
index d6f9987..0000000
--- a/runtime/interpreter/mterp/mips/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_eqz.S b/runtime/interpreter/mterp/mips/op_if_eqz.S
deleted file mode 100644
index c52b76a..0000000
--- a/runtime/interpreter/mterp/mips/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ge.S b/runtime/interpreter/mterp/mips/op_if_ge.S
deleted file mode 100644
index bd06ff5..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gez.S b/runtime/interpreter/mterp/mips/op_if_gez.S
deleted file mode 100644
index 549231a..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gt.S b/runtime/interpreter/mterp/mips/op_if_gt.S
deleted file mode 100644
index 0be3091..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_gtz.S b/runtime/interpreter/mterp/mips/op_if_gtz.S
deleted file mode 100644
index 5c7bcc4..0000000
--- a/runtime/interpreter/mterp/mips/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_le.S b/runtime/interpreter/mterp/mips/op_if_le.S
deleted file mode 100644
index c35c1a2..0000000
--- a/runtime/interpreter/mterp/mips/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lez.S b/runtime/interpreter/mterp/mips/op_if_lez.S
deleted file mode 100644
index 3dc6543..0000000
--- a/runtime/interpreter/mterp/mips/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips/op_if_lt.S b/runtime/interpreter/mterp/mips/op_if_lt.S
deleted file mode 100644
index 3f3386c..0000000
--- a/runtime/interpreter/mterp/mips/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ltz.S b/runtime/interpreter/mterp/mips/op_if_ltz.S
deleted file mode 100644
index e6d6ed6..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips/op_if_ne.S b/runtime/interpreter/mterp/mips/op_if_ne.S
deleted file mode 100644
index 3d7bf35..0000000
--- a/runtime/interpreter/mterp/mips/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_if_nez.S b/runtime/interpreter/mterp/mips/op_if_nez.S
deleted file mode 100644
index d121eae..0000000
--- a/runtime/interpreter/mterp/mips/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips/op_iget.S b/runtime/interpreter/mterp/mips/op_iget.S
deleted file mode 100644
index 33717de..0000000
--- a/runtime/interpreter/mterp/mips/op_iget.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL($helper)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean.S b/runtime/interpreter/mterp/mips/op_iget_boolean.S
deleted file mode 100644
index f2ef68d..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
deleted file mode 100644
index f3032b3..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte.S b/runtime/interpreter/mterp/mips/op_iget_byte.S
deleted file mode 100644
index 0c8fb7c..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
deleted file mode 100644
index d93f844..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char.S b/runtime/interpreter/mterp/mips/op_iget_char.S
deleted file mode 100644
index 69d04c4..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_char_quick.S b/runtime/interpreter/mterp/mips/op_iget_char_quick.S
deleted file mode 100644
index 6f6d608..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object.S b/runtime/interpreter/mterp/mips/op_iget_object.S
deleted file mode 100644
index bea330a..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_object_quick.S b/runtime/interpreter/mterp/mips/op_iget_object_quick.S
deleted file mode 100644
index 95c34d7..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_object_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- FETCH(a1, 1) # a1 <- field byte offset
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- object we're operating on
- JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
diff --git a/runtime/interpreter/mterp/mips/op_iget_quick.S b/runtime/interpreter/mterp/mips/op_iget_quick.S
deleted file mode 100644
index 46277d3..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"lw" }
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- $load a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
diff --git a/runtime/interpreter/mterp/mips/op_iget_short.S b/runtime/interpreter/mterp/mips/op_iget_short.S
deleted file mode 100644
index 357c791..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_short_quick.S b/runtime/interpreter/mterp/mips/op_iget_short_quick.S
deleted file mode 100644
index 899a0fe..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide.S b/runtime/interpreter/mterp/mips/op_iget_wide.S
deleted file mode 100644
index 858a889..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_wide.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field byte offset
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetU64)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a2, t0) # fp[A] <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
deleted file mode 100644
index 128be57..0000000
--- a/runtime/interpreter/mterp/mips/op_iget_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* iget-wide-quick vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1 # t0 <- a3 + a1
- LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_instance_of.S b/runtime/interpreter/mterp/mips/op_instance_of.S
deleted file mode 100644
index 706dcf3..0000000
--- a/runtime/interpreter/mterp/mips/op_instance_of.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- CCCC
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- GET_OPA4(rOBJ) # rOBJ <- A+
- JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
diff --git a/runtime/interpreter/mterp/mips/op_int_to_byte.S b/runtime/interpreter/mterp/mips/op_int_to_byte.S
deleted file mode 100644
index 9266aab..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"SEB(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_char.S b/runtime/interpreter/mterp/mips/op_int_to_char.S
deleted file mode 100644
index 1b74a6e..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_double.S b/runtime/interpreter/mterp/mips/op_int_to_double.S
deleted file mode 100644
index 89484ce..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funopWider.S" {"instr":"cvt.d.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_float.S b/runtime/interpreter/mterp/mips/op_int_to_float.S
deleted file mode 100644
index d6f4b36..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/funop.S" {"instr":"cvt.s.w fv0, fa0"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_long.S b/runtime/interpreter/mterp/mips/op_int_to_long.S
deleted file mode 100644
index 9907463..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/runtime/interpreter/mterp/mips/op_int_to_short.S b/runtime/interpreter/mterp/mips/op_int_to_short.S
deleted file mode 100644
index 8749cd8..0000000
--- a/runtime/interpreter/mterp/mips/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"SEH(a0, a0)"}
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom.S b/runtime/interpreter/mterp/mips/op_invoke_custom.S
deleted file mode 100644
index f9241c4..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
deleted file mode 100644
index 862a614..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct.S b/runtime/interpreter/mterp/mips/op_invoke_direct.S
deleted file mode 100644
index 1ef198a..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
deleted file mode 100644
index af7477f..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface.S b/runtime/interpreter/mterp/mips/op_invoke_interface.S
deleted file mode 100644
index 80a485a..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_interface.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeInterface" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
deleted file mode 100644
index 8d725dc..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
deleted file mode 100644
index 85e01e7..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
deleted file mode 100644
index ce63978..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static.S b/runtime/interpreter/mterp/mips/op_invoke_static.S
deleted file mode 100644
index 46253cb..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_static.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_static_range.S b/runtime/interpreter/mterp/mips/op_invoke_static_range.S
deleted file mode 100644
index 96abafe..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super.S b/runtime/interpreter/mterp/mips/op_invoke_super.S
deleted file mode 100644
index 473951b..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_super.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeSuper" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_super_range.S b/runtime/interpreter/mterp/mips/op_invoke_super_range.S
deleted file mode 100644
index 963ff27..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual.S b/runtime/interpreter/mterp/mips/op_invoke_virtual.S
deleted file mode 100644
index ea51e98..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtual" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
deleted file mode 100644
index 0c00091..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
deleted file mode 100644
index 82201e7..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
deleted file mode 100644
index c783675..0000000
--- a/runtime/interpreter/mterp/mips/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips/op_iput.S b/runtime/interpreter/mterp/mips/op_iput.S
deleted file mode 100644
index 4dd4075..0000000
--- a/runtime/interpreter/mterp/mips/op_iput.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "helper":"MterpIPutU32" }
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern $helper
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL($helper)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean.S b/runtime/interpreter/mterp/mips/op_iput_boolean.S
deleted file mode 100644
index 55ac4ce..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
deleted file mode 100644
index 7d5caf6..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte.S b/runtime/interpreter/mterp/mips/op_iput_byte.S
deleted file mode 100644
index 61e489b..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
deleted file mode 100644
index 7d5caf6..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char.S b/runtime/interpreter/mterp/mips/op_iput_char.S
deleted file mode 100644
index 2caad1e..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_char_quick.S b/runtime/interpreter/mterp/mips/op_iput_char_quick.S
deleted file mode 100644
index 4bc84eb..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_object.S b/runtime/interpreter/mterp/mips/op_iput_object.S
deleted file mode 100644
index c96a4d4..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_object.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * 32-bit instance field put.
- *
- * for: iput-object, iput-object-volatile
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpIPutObj)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_object_quick.S b/runtime/interpreter/mterp/mips/op_iput_object_quick.S
deleted file mode 100644
index 82044f5..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_object_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* For: iput-object-quick */
- /* op vA, vB, offset@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpIputObjectQuick)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_quick.S b/runtime/interpreter/mterp/mips/op_iput_quick.S
deleted file mode 100644
index d9753b1..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default { "store":"sw" }
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- $store a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_short.S b/runtime/interpreter/mterp/mips/op_iput_short.S
deleted file mode 100644
index 414a15b..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_short_quick.S b/runtime/interpreter/mterp/mips/op_iput_short_quick.S
deleted file mode 100644
index 4bc84eb..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide.S b/runtime/interpreter/mterp/mips/op_iput_wide.S
deleted file mode 100644
index dccb6b7..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- EAS2(a2, rFP, a2) # a2 <- &fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutU64)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
deleted file mode 100644
index 0eb228d..0000000
--- a/runtime/interpreter/mterp/mips/op_iput_wide_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* iput-wide-quick vA, vB, offset@CCCC */
- GET_OPA4(a0) # a0 <- A(+)
- GET_OPB(a1) # a1 <- B
- GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
- # check object for null
- beqz a2, common_errNullObject # object was null
- EAS2(a3, rFP, a0) # a3 <- &fp[A]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
- FETCH(a3, 1) # a3 <- field byte offset
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
- JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_long_to_double.S b/runtime/interpreter/mterp/mips/op_long_to_double.S
deleted file mode 100644
index 153f582..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_double.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /*
- * long-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
diff --git a/runtime/interpreter/mterp/mips/op_long_to_float.S b/runtime/interpreter/mterp/mips/op_long_to_float.S
deleted file mode 100644
index dd1ab81..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_float.S
+++ /dev/null
@@ -1,20 +0,0 @@
- /*
- * long-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdisf)
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/op_long_to_int.S b/runtime/interpreter/mterp/mips/op_long_to_int.S
deleted file mode 100644
index 949c180..0000000
--- a/runtime/interpreter/mterp/mips/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "mips/op_move.S"
diff --git a/runtime/interpreter/mterp/mips/op_monitor_enter.S b/runtime/interpreter/mterp/mips/op_monitor_enter.S
deleted file mode 100644
index 20d9029..0000000
--- a/runtime/interpreter/mterp/mips/op_monitor_enter.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_monitor_exit.S b/runtime/interpreter/mterp/mips/op_monitor_exit.S
deleted file mode 100644
index 1eadff9..0000000
--- a/runtime/interpreter/mterp/mips/op_monitor_exit.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move.S b/runtime/interpreter/mterp/mips/op_move.S
deleted file mode 100644
index 547ea3a..0000000
--- a/runtime/interpreter/mterp/mips/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if $is_object
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_16.S b/runtime/interpreter/mterp/mips/op_move_16.S
deleted file mode 100644
index 91b7399..0000000
--- a/runtime/interpreter/mterp/mips/op_move_16.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH(a1, 2) # a1 <- BBBB
- FETCH(a0, 1) # a0 <- AAAA
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_exception.S b/runtime/interpreter/mterp/mips/op_move_exception.S
deleted file mode 100644
index f1bece7..0000000
--- a/runtime/interpreter/mterp/mips/op_move_exception.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-exception vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
- sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- JR(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_move_from16.S b/runtime/interpreter/mterp/mips/op_move_from16.S
deleted file mode 100644
index 90c25c9..0000000
--- a/runtime/interpreter/mterp/mips/op_move_from16.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH(a1, 1) # a1 <- BBBB
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
- .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_object.S b/runtime/interpreter/mterp/mips/op_move_object.S
deleted file mode 100644
index 9420ff3..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_16.S b/runtime/interpreter/mterp/mips/op_move_object_16.S
deleted file mode 100644
index d6454c2..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_object_from16.S b/runtime/interpreter/mterp/mips/op_move_object_from16.S
deleted file mode 100644
index db0aca1..0000000
--- a/runtime/interpreter/mterp/mips/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result.S b/runtime/interpreter/mterp/mips/op_move_result.S
deleted file mode 100644
index a4d5bfe..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- GET_OPA(a2) # a2 <- AA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
- .else
- SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
- .endif
diff --git a/runtime/interpreter/mterp/mips/op_move_result_object.S b/runtime/interpreter/mterp/mips/op_move_result_object.S
deleted file mode 100644
index fcbffee..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_move_result_wide.S b/runtime/interpreter/mterp/mips/op_move_result_wide.S
deleted file mode 100644
index 1259218..0000000
--- a/runtime/interpreter/mterp/mips/op_move_result_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* move-result-wide vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- LOAD64(a0, a1, a3) # a0/a1 <- retval.j
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide.S b/runtime/interpreter/mterp/mips/op_move_wide.S
deleted file mode 100644
index 01d0949..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- GET_OPA4(a2) # a2 <- A(+)
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_16.S b/runtime/interpreter/mterp/mips/op_move_wide_16.S
deleted file mode 100644
index 587ba04..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 2) # a3 <- BBBB
- FETCH(a2, 1) # a2 <- AAAA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_move_wide_from16.S b/runtime/interpreter/mterp/mips/op_move_wide_from16.S
deleted file mode 100644
index 5003fbd..0000000
--- a/runtime/interpreter/mterp/mips/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 1) # a3 <- BBBB
- GET_OPA(a2) # a2 <- AA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/op_mul_double.S b/runtime/interpreter/mterp/mips/op_mul_double.S
deleted file mode 100644
index 44a473b..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
deleted file mode 100644
index 4e5c230..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"mul.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float.S b/runtime/interpreter/mterp/mips/op_mul_float.S
deleted file mode 100644
index abc9390..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
deleted file mode 100644
index 2469109..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"mul.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int.S b/runtime/interpreter/mterp/mips/op_mul_int.S
deleted file mode 100644
index 266823c..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
deleted file mode 100644
index b7dc5d3..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
deleted file mode 100644
index fb4c8ec..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
deleted file mode 100644
index 6d2e7de..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_mul_long.S b/runtime/interpreter/mterp/mips/op_mul_long.S
deleted file mode 100644
index 74b049a..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_long.S
+++ /dev/null
@@ -1,42 +0,0 @@
- /*
- * Signed 64-bit integer multiply.
- * a1 a0
- * x a3 a2
- * -------------
- * a2a1 a2a0
- * a3a0
- * a3a1 (<= unused)
- * ---------------
- * v1 v0
- */
- /* mul-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- and t0, a0, 255 # a2 <- BB
- srl t1, a0, 8 # a3 <- CC
- EAS2(t0, rFP, t0) # t0 <- &fp[BB]
- LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
-
- EAS2(t1, rFP, t1) # t0 <- &fp[CC]
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
-#endif
- mul t0, a2, a1 # t0= a2a1
- addu v1, v1, t1 # v1+= hi(a2a0)
- addu v1, v1, t0 # v1= a3a0 + a2a1;
-
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- b .L${opcode}_finish
-%break
-
-.L${opcode}_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
deleted file mode 100644
index 683b055..0000000
--- a/runtime/interpreter/mterp/mips/op_mul_long_2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
- /*
- * See op_mul_long.S for more details
- */
- /* mul-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
-
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # vAA.low / high
-
- GET_OPB(t1) # t1 <- B
- EAS2(t1, rFP, t1) # t1 <- &fp[B]
- LOAD64(a2, a3, t1) # vBB.low / high
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
- #endif
- mul t2, a2, a1 # t2= a2a1
- addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
- addu v1, v1, t2 # v1= v1 + a2a1;
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
diff --git a/runtime/interpreter/mterp/mips/op_neg_double.S b/runtime/interpreter/mterp/mips/op_neg_double.S
deleted file mode 100644
index 89cc918..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_float.S b/runtime/interpreter/mterp/mips/op_neg_float.S
deleted file mode 100644
index e702755..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_int.S b/runtime/interpreter/mterp/mips/op_neg_int.S
deleted file mode 100644
index 4461731..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_neg_long.S b/runtime/interpreter/mterp/mips/op_neg_long.S
deleted file mode 100644
index 71e60f5..0000000
--- a/runtime/interpreter/mterp/mips/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_new_array.S b/runtime/interpreter/mterp/mips/op_new_array.S
deleted file mode 100644
index 4a6512d..0000000
--- a/runtime/interpreter/mterp/mips/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpNewArray)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_new_instance.S b/runtime/interpreter/mterp/mips/op_new_instance.S
deleted file mode 100644
index 3c9e83f..0000000
--- a/runtime/interpreter/mterp/mips/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- JAL(MterpNewInstance)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_nop.S b/runtime/interpreter/mterp/mips/op_nop.S
deleted file mode 100644
index 3565631..0000000
--- a/runtime/interpreter/mterp/mips/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_not_int.S b/runtime/interpreter/mterp/mips/op_not_int.S
deleted file mode 100644
index 55d8cc1..0000000
--- a/runtime/interpreter/mterp/mips/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_not_long.S b/runtime/interpreter/mterp/mips/op_not_long.S
deleted file mode 100644
index 9e7c95b..0000000
--- a/runtime/interpreter/mterp/mips/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int.S b/runtime/interpreter/mterp/mips/op_or_int.S
deleted file mode 100644
index c7ce760..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_2addr.S b/runtime/interpreter/mterp/mips/op_or_int_2addr.S
deleted file mode 100644
index 192d611..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit16.S b/runtime/interpreter/mterp/mips/op_or_int_lit16.S
deleted file mode 100644
index f4ef75f..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_int_lit8.S b/runtime/interpreter/mterp/mips/op_or_int_lit8.S
deleted file mode 100644
index f6212e2..0000000
--- a/runtime/interpreter/mterp/mips/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long.S b/runtime/interpreter/mterp/mips/op_or_long.S
deleted file mode 100644
index 0f94486..0000000
--- a/runtime/interpreter/mterp/mips/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_or_long_2addr.S b/runtime/interpreter/mterp/mips/op_or_long_2addr.S
deleted file mode 100644
index 43c3d05..0000000
--- a/runtime/interpreter/mterp/mips/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_packed_switch.S b/runtime/interpreter/mterp/mips/op_packed_switch.S
deleted file mode 100644
index 0a1ff98..0000000
--- a/runtime/interpreter/mterp/mips/op_packed_switch.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_VREG(a1, a3) # a1 <- vAA
- EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
- JAL($func) # a0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips/op_rem_double.S b/runtime/interpreter/mterp/mips/op_rem_double.S
deleted file mode 100644
index a6890a8..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
deleted file mode 100644
index a24e160..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"JAL(fmod)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float.S b/runtime/interpreter/mterp/mips/op_rem_float.S
deleted file mode 100644
index ac3d50c..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
deleted file mode 100644
index 7f0a932..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"JAL(fmodf)"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_int.S b/runtime/interpreter/mterp/mips/op_rem_int.S
deleted file mode 100644
index c2a334a..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
deleted file mode 100644
index 46c353f..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_2addr.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binop2addr.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
deleted file mode 100644
index 2894ad3..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_lit16.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit16.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
deleted file mode 100644
index 582248b..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_int_lit8.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef MIPS32REVGE6
-%include "mips/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
-#else
-%include "mips/binopLit8.S" {"preinstr":"div zero, a0, a1", "instr":"mfhi a0", "chkzero":"1"}
-#endif
diff --git a/runtime/interpreter/mterp/mips/op_rem_long.S b/runtime/interpreter/mterp/mips/op_rem_long.S
deleted file mode 100644
index e3eb19b..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
deleted file mode 100644
index 8fc9fdb..0000000
--- a/runtime/interpreter/mterp/mips/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips/op_return.S b/runtime/interpreter/mterp/mips/op_return.S
deleted file mode 100644
index 44b9395..0000000
--- a/runtime/interpreter/mterp/mips/op_return.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- GET_VREG(v0, a2) # v0 <- vAA
- move v1, zero
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_object.S b/runtime/interpreter/mterp/mips/op_return_object.S
deleted file mode 100644
index 7350e00..0000000
--- a/runtime/interpreter/mterp/mips/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_return.S"
diff --git a/runtime/interpreter/mterp/mips/op_return_void.S b/runtime/interpreter/mterp/mips/op_return_void.S
deleted file mode 100644
index 1f616ea..0000000
--- a/runtime/interpreter/mterp/mips/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
deleted file mode 100644
index e670c28..0000000
--- a/runtime/interpreter/mterp/mips/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_return_wide.S b/runtime/interpreter/mterp/mips/op_return_wide.S
deleted file mode 100644
index f0f679d..0000000
--- a/runtime/interpreter/mterp/mips/op_return_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
- LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int.S b/runtime/interpreter/mterp/mips/op_rsub_int.S
deleted file mode 100644
index f7e61bb..0000000
--- a/runtime/interpreter/mterp/mips/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
deleted file mode 100644
index 3968a5e..0000000
--- a/runtime/interpreter/mterp/mips/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips/op_sget.S b/runtime/interpreter/mterp/mips/op_sget.S
deleted file mode 100644
index 8750a17..0000000
--- a/runtime/interpreter/mterp/mips/op_sget.S
+++ /dev/null
@@ -1,24 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern $helper
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL($helper)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if $is_object
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
diff --git a/runtime/interpreter/mterp/mips/op_sget_boolean.S b/runtime/interpreter/mterp/mips/op_sget_boolean.S
deleted file mode 100644
index 7a7012e..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_byte.S b/runtime/interpreter/mterp/mips/op_sget_byte.S
deleted file mode 100644
index a2f1dbf..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_char.S b/runtime/interpreter/mterp/mips/op_sget_char.S
deleted file mode 100644
index 07d4041..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_object.S b/runtime/interpreter/mterp/mips/op_sget_object.S
deleted file mode 100644
index 0a3c9ee..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_short.S b/runtime/interpreter/mterp/mips/op_sget_short.S
deleted file mode 100644
index 2960443..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sget_wide.S b/runtime/interpreter/mterp/mips/op_sget_wide.S
deleted file mode 100644
index 76f78cb..0000000
--- a/runtime/interpreter/mterp/mips/op_sget_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * 64-bit SGET handler.
- */
- /* sget-wide vAA, field@BBBB */
- .extern MterpSGetU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetU64)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- bnez a3, MterpException
- GET_OPA(a1) # a1 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a1, t0) # vAA/vAA+1 <- v0/v1
diff --git a/runtime/interpreter/mterp/mips/op_shl_int.S b/runtime/interpreter/mterp/mips/op_shl_int.S
deleted file mode 100644
index 15cbe94..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
deleted file mode 100644
index ef9bd65..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
deleted file mode 100644
index d2afb53..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shl_long.S b/runtime/interpreter/mterp/mips/op_shl_long.S
deleted file mode 100644
index cc08112..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t2) # t2 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .L${opcode}_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
deleted file mode 100644
index 93c5783..0000000
--- a/runtime/interpreter/mterp/mips/op_shl_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
- LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .L${opcode}_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_int.S b/runtime/interpreter/mterp/mips/op_shr_int.S
deleted file mode 100644
index 6110839..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
deleted file mode 100644
index e00ff5b..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
deleted file mode 100644
index d058f58..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_shr_long.S b/runtime/interpreter/mterp/mips/op_shr_long.S
deleted file mode 100644
index ea032fe..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t3) # t3 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .L${opcode}_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
deleted file mode 100644
index c805ea4..0000000
--- a/runtime/interpreter/mterp/mips/op_shr_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- GET_OPA4(t2) # t2 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t2) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .L${opcode}_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_sparse_switch.S b/runtime/interpreter/mterp/mips/op_sparse_switch.S
deleted file mode 100644
index 670f464..0000000
--- a/runtime/interpreter/mterp/mips/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips/op_sput.S b/runtime/interpreter/mterp/mips/op_sput.S
deleted file mode 100644
index 547de39..0000000
--- a/runtime/interpreter/mterp/mips/op_sput.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"MterpSPutU32"}
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL($helper)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_boolean.S b/runtime/interpreter/mterp/mips/op_sput_boolean.S
deleted file mode 100644
index 0137430..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_byte.S b/runtime/interpreter/mterp/mips/op_sput_byte.S
deleted file mode 100644
index 5ae4256..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_char.S b/runtime/interpreter/mterp/mips/op_sput_char.S
deleted file mode 100644
index 83787a7..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_object.S b/runtime/interpreter/mterp/mips/op_sput_object.S
deleted file mode 100644
index 55c88a6..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_object.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * General 32-bit SPUT handler.
- *
- * for: sput-object,
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpSPutObj)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sput_short.S b/runtime/interpreter/mterp/mips/op_sput_short.S
deleted file mode 100644
index df99b44..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips/op_sput_wide.S b/runtime/interpreter/mterp/mips/op_sput_wide.S
deleted file mode 100644
index cfaaaee..0000000
--- a/runtime/interpreter/mterp/mips/op_sput_wide.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * 64-bit SPUT handler.
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPA(a1) # a1 <- AA
- EAS2(a1, rFP, a1) # a1 <- &fp[AA]
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutU64)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/op_sub_double.S b/runtime/interpreter/mterp/mips/op_sub_double.S
deleted file mode 100644
index 9473218..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
deleted file mode 100644
index 7ce7c74..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinopWide2addr.S" {"instr":"sub.d fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float.S b/runtime/interpreter/mterp/mips/op_sub_float.S
deleted file mode 100644
index 04650d9..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
deleted file mode 100644
index dfe935c..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/fbinop2addr.S" {"instr":"sub.s fv0, fa0, fa1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int.S b/runtime/interpreter/mterp/mips/op_sub_int.S
deleted file mode 100644
index 43da1b6..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
deleted file mode 100644
index cf34aa6..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_sub_long.S b/runtime/interpreter/mterp/mips/op_sub_long.S
deleted file mode 100644
index 0f58e8e..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * For little endian the code sequence looks as follows:
- * subu v0,a0,a2
- * subu v1,a1,a3
- * sltu a0,a0,v0
- * subu v1,v1,a0
- */
-%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
deleted file mode 100644
index aa256c2..0000000
--- a/runtime/interpreter/mterp/mips/op_sub_long_2addr.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * See op_sub_long.S for more details
- */
-%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/runtime/interpreter/mterp/mips/op_throw.S b/runtime/interpreter/mterp/mips/op_throw.S
deleted file mode 100644
index adc8b04..0000000
--- a/runtime/interpreter/mterp/mips/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC() # exception handler can throw
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a1, a2) # a1 <- vAA (exception object)
- # null object?
- beqz a1, common_errNullObject # yes, throw an NPE instead
- sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/mips/op_unused_3e.S b/runtime/interpreter/mterp/mips/op_unused_3e.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_3f.S b/runtime/interpreter/mterp/mips/op_unused_3f.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_40.S b/runtime/interpreter/mterp/mips/op_unused_40.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_41.S b/runtime/interpreter/mterp/mips/op_unused_41.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_42.S b/runtime/interpreter/mterp/mips/op_unused_42.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_43.S b/runtime/interpreter/mterp/mips/op_unused_43.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_73.S b/runtime/interpreter/mterp/mips/op_unused_73.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_73.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_79.S b/runtime/interpreter/mterp/mips/op_unused_79.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_7a.S b/runtime/interpreter/mterp/mips/op_unused_7a.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f3.S b/runtime/interpreter/mterp/mips/op_unused_f3.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f4.S b/runtime/interpreter/mterp/mips/op_unused_f4.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f5.S b/runtime/interpreter/mterp/mips/op_unused_f5.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f6.S b/runtime/interpreter/mterp/mips/op_unused_f6.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f7.S b/runtime/interpreter/mterp/mips/op_unused_f7.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f8.S b/runtime/interpreter/mterp/mips/op_unused_f8.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_f9.S b/runtime/interpreter/mterp/mips/op_unused_f9.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fc.S b/runtime/interpreter/mterp/mips/op_unused_fc.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_unused_fd.S b/runtime/interpreter/mterp/mips/op_unused_fd.S
deleted file mode 100644
index 99ef3cf..0000000
--- a/runtime/interpreter/mterp/mips/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/unused.S"
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int.S b/runtime/interpreter/mterp/mips/op_ushr_int.S
deleted file mode 100644
index b95472b..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
deleted file mode 100644
index fc17778..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"srl a0, a0, a1 "}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
deleted file mode 100644
index c82cfba..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long.S b/runtime/interpreter/mterp/mips/op_ushr_long.S
deleted file mode 100644
index 2e227a9..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_long.S
+++ /dev/null
@@ -1,31 +0,0 @@
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .L${opcode}_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
deleted file mode 100644
index 9e93f34..0000000
--- a/runtime/interpreter/mterp/mips/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,27 +0,0 @@
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- GET_OPA4(t3) # t3 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t3) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .L${opcode}_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
-%break
-
-.L${opcode}_finish:
- SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
diff --git a/runtime/interpreter/mterp/mips/op_xor_int.S b/runtime/interpreter/mterp/mips/op_xor_int.S
deleted file mode 100644
index 6c23f1f..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
deleted file mode 100644
index 5ee1667..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
deleted file mode 100644
index 2af37a6..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
deleted file mode 100644
index 944ed69..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long.S b/runtime/interpreter/mterp/mips/op_xor_long.S
deleted file mode 100644
index 93f8f70..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
deleted file mode 100644
index 49f3fa4..0000000
--- a/runtime/interpreter/mterp/mips/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/runtime/interpreter/mterp/mips/other.S b/runtime/interpreter/mterp/mips/other.S
new file mode 100644
index 0000000..5002329
--- /dev/null
+++ b/runtime/interpreter/mterp/mips/other.S
@@ -0,0 +1,345 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- BBBB
+ GET_OPA(a1) # a1 <- AA
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL($helper) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(2) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(2) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+%def op_const():
+ /* const vAA, +BBBBbbbb */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+%def op_const_16():
+ /* const/16 vAA, +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+%def op_const_4():
+ /* const/4 vA, +B */
+ sll a1, rINST, 16 # a1 <- Bxxx0000
+ GET_OPA(a0) # a0 <- A+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
+ and a0, a0, 15
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, +BBBB0000 */
+ FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ sll a0, a0, 16 # a0 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, string@BBBBBBBB */
+ EXPORT_PC()
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a2, 2) # a2 <- BBBB (high)
+ GET_OPA(a1) # a1 <- AA
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
+ addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
+ move a3, rSELF
+ JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST(3) # load rINST
+ bnez v0, MterpPossibleException
+ ADVANCE(3) # advance rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_const_wide():
+ /* const-wide vAA, +HHHHhhhhBBBBbbbb */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (low middle)
+ FETCH(a2, 3) # a2 <- hhhh (high middle)
+ INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
+ FETCH(a3, 4) # a3 <- HHHH (high)
+ GET_OPA(t1) # t1 <- AA
+ INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ sra a1, a0, 31 # a1 <- ssssssss
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, +BBBBbbbb */
+ FETCH(a0, 1) # a0 <- 0000bbbb (low)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
+ sra a1, a0, 31 # a1 <- ssssssss
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, +BBBB000000000000 */
+ FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ li a0, 0 # a0 <- 00000000
+ sll a1, 16 # a1 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
+
+%def op_monitor_enter():
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_monitor_exit():
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC()
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
+ bnez v0, MterpException
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
+ .else
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
+ .endif
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
+ .else
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
+ .endif
+
+%def op_move_exception():
+ /* move-exception vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GET_OPCODE_TARGET(t0)
+ SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
+ sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ JR(t0) # jump to next instruction
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
+ .else
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
+ .endif
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
+ .else
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
+ .endif
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* move-result-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ LOAD64(a0, a1, a3) # a0/a1 <- retval.j
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ GET_OPA4(a2) # a2 <- A(+)
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 2) # a3 <- BBBB
+ FETCH(a2, 1) # a2 <- AAAA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 1) # a3 <- BBBB
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
+
+%def op_nop():
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_73():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/mips/unop.S b/runtime/interpreter/mterp/mips/unop.S
deleted file mode 100644
index bc99263..0000000
--- a/runtime/interpreter/mterp/mips/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":"", "result0":"a0"}
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $preinstr # optional op
- $instr # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO($result0, t0, t1) # vA <- result0
diff --git a/runtime/interpreter/mterp/mips/unopNarrower.S b/runtime/interpreter/mterp/mips/unopNarrower.S
deleted file mode 100644
index 0196e27..0000000
--- a/runtime/interpreter/mterp/mips/unopNarrower.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"load":"LOAD64_F(fa0, fa0f, a3)"}
- /*
- * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: double-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- $load
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $instr
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
diff --git a/runtime/interpreter/mterp/mips/unopWide.S b/runtime/interpreter/mterp/mips/unopWide.S
deleted file mode 100644
index 135d9fa..0000000
--- a/runtime/interpreter/mterp/mips/unopWide.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1"}
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $preinstr # optional op
- $instr # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unopWider.S b/runtime/interpreter/mterp/mips/unopWider.S
deleted file mode 100644
index ca888ad..0000000
--- a/runtime/interpreter/mterp/mips/unopWider.S
+++ /dev/null
@@ -1,16 +0,0 @@
-%default {"preinstr":"", "result0":"a0", "result1":"a1"}
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result0/result1 = op a0".
- *
- * For: int-to-long
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- $preinstr # optional op
- $instr # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO($result0, $result1, rOBJ, t0) # vA/vA+1 <- a0/a1
diff --git a/runtime/interpreter/mterp/mips/unused.S b/runtime/interpreter/mterp/mips/unused.S
deleted file mode 100644
index ffa00be..0000000
--- a/runtime/interpreter/mterp/mips/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/mips/zcmp.S b/runtime/interpreter/mterp/mips/zcmp.S
deleted file mode 100644
index 8d3a198..0000000
--- a/runtime/interpreter/mterp/mips/zcmp.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- b${condition} a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/alt_stub.S b/runtime/interpreter/mterp/mips64/alt_stub.S
deleted file mode 100644
index 12fa84d..0000000
--- a/runtime/interpreter/mterp/mips64/alt_stub.S
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (${opnum} * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
diff --git a/runtime/interpreter/mterp/mips64/arithmetic.S b/runtime/interpreter/mterp/mips64/arithmetic.S
new file mode 100644
index 0000000..0b03e02
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/arithmetic.S
@@ -0,0 +1,458 @@
+%def binop(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG a0, a2 # a0 <- vBB
+ GET_VREG a1, a3 # a1 <- vCC
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a4 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CCCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ lh a1, 2(rPC) # a1 <- sign-extended CCCC
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * CC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ lbu a3, 2(rPC) # a3 <- BB
+ lb a1, 3(rPC) # a1 <- sign-extended CC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG a0, a3 # a0 <- vBB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG $result, a2 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+
+%def binopWide(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
+ * xor-long, shl-long, shr-long, ushr-long
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE $result, a4 # vAA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def binopWide2addr(preinstr="", result="a0", chkzero="0", instr=""):
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vB (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
+ * correctly.
+ *
+ * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
+ * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
+ * shl-long/2addr, shr-long/2addr, ushr-long/2addr
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a2 # a0 <- vA
+ GET_VREG_WIDE a1, a3 # a1 <- vB
+ .if $chkzero
+ beqz a1, common_errDivideByZero # is second operand zero?
+ .endif
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE $result, a2 # vA <- $result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def unop(preinstr="", instr=""):
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * for: int-to-byte, int-to-char, int-to-short,
+ * not-int, neg-int
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ $preinstr # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def unopWide(preinstr="", instr=""):
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "a0 = op a0".
+ *
+ * For: not-long, neg-long
+ */
+ /* unop vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ ext a2, rINST, 8, 4 # a2 <- A
+ $preinstr # optional op
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_add_int():
+% binop(instr="addu a0, a0, a1")
+
+%def op_add_int_2addr():
+% binop2addr(instr="addu a0, a0, a1")
+
+%def op_add_int_lit16():
+% binopLit16(instr="addu a0, a0, a1")
+
+%def op_add_int_lit8():
+% binopLit8(instr="addu a0, a0, a1")
+
+%def op_add_long():
+% binopWide(instr="daddu a0, a0, a1")
+
+%def op_add_long_2addr():
+% binopWide2addr(instr="daddu a0, a0, a1")
+
+%def op_and_int():
+% binop(instr="and a0, a0, a1")
+
+%def op_and_int_2addr():
+% binop2addr(instr="and a0, a0, a1")
+
+%def op_and_int_lit16():
+% binopLit16(instr="and a0, a0, a1")
+
+%def op_and_int_lit8():
+% binopLit8(instr="and a0, a0, a1")
+
+%def op_and_long():
+% binopWide(instr="and a0, a0, a1")
+
+%def op_and_long_2addr():
+% binopWide2addr(instr="and a0, a0, a1")
+
+%def op_cmp_long():
+ /* cmp-long vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vBB
+ GET_VREG_WIDE a1, a3 # a1 <- vCC
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ slt a2, a0, a1
+ slt a0, a1, a0
+ subu a0, a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_div_int():
+% binop(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_2addr():
+% binop2addr(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_lit16():
+% binopLit16(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_int_lit8():
+% binopLit8(instr="div a0, a0, a1", chkzero="1")
+
+%def op_div_long():
+% binopWide(instr="ddiv a0, a0, a1", chkzero="1")
+
+%def op_div_long_2addr():
+% binopWide2addr(instr="ddiv a0, a0, a1", chkzero="1")
+
+%def op_int_to_byte():
+% unop(instr="seb a0, a0")
+
+%def op_int_to_char():
+% unop(instr="and a0, a0, 0xffff")
+
+%def op_int_to_long():
+ /* int-to-long vA, vB */
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_int_to_short():
+% unop(instr="seh a0, a0")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+% binop(instr="mul a0, a0, a1")
+
+%def op_mul_int_2addr():
+% binop2addr(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit16():
+% binopLit16(instr="mul a0, a0, a1")
+
+%def op_mul_int_lit8():
+% binopLit8(instr="mul a0, a0, a1")
+
+%def op_mul_long():
+% binopWide(instr="dmul a0, a0, a1")
+
+%def op_mul_long_2addr():
+% binopWide2addr(instr="dmul a0, a0, a1")
+
+%def op_neg_int():
+% unop(instr="subu a0, zero, a0")
+
+%def op_neg_long():
+% unopWide(instr="dsubu a0, zero, a0")
+
+%def op_not_int():
+% unop(instr="nor a0, zero, a0")
+
+%def op_not_long():
+% unopWide(instr="nor a0, zero, a0")
+
+%def op_or_int():
+% binop(instr="or a0, a0, a1")
+
+%def op_or_int_2addr():
+% binop2addr(instr="or a0, a0, a1")
+
+%def op_or_int_lit16():
+% binopLit16(instr="or a0, a0, a1")
+
+%def op_or_int_lit8():
+% binopLit8(instr="or a0, a0, a1")
+
+%def op_or_long():
+% binopWide(instr="or a0, a0, a1")
+
+%def op_or_long_2addr():
+% binopWide2addr(instr="or a0, a0, a1")
+
+%def op_rem_int():
+% binop(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_2addr():
+% binop2addr(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_lit16():
+% binopLit16(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_int_lit8():
+% binopLit8(instr="mod a0, a0, a1", chkzero="1")
+
+%def op_rem_long():
+% binopWide(instr="dmod a0, a0, a1", chkzero="1")
+
+%def op_rem_long_2addr():
+% binopWide2addr(instr="dmod a0, a0, a1", chkzero="1")
+
+%def op_rsub_int():
+% binopLit16(instr="subu a0, a1, a0")
+
+%def op_rsub_int_lit8():
+% binopLit8(instr="subu a0, a1, a0")
+
+%def op_shl_int():
+% binop(instr="sll a0, a0, a1")
+
+%def op_shl_int_2addr():
+% binop2addr(instr="sll a0, a0, a1")
+
+%def op_shl_int_lit8():
+% binopLit8(instr="sll a0, a0, a1")
+
+%def op_shl_long():
+% binopWide(instr="dsll a0, a0, a1")
+
+%def op_shl_long_2addr():
+% binopWide2addr(instr="dsll a0, a0, a1")
+
+%def op_shr_int():
+% binop(instr="sra a0, a0, a1")
+
+%def op_shr_int_2addr():
+% binop2addr(instr="sra a0, a0, a1")
+
+%def op_shr_int_lit8():
+% binopLit8(instr="sra a0, a0, a1")
+
+%def op_shr_long():
+% binopWide(instr="dsra a0, a0, a1")
+
+%def op_shr_long_2addr():
+% binopWide2addr(instr="dsra a0, a0, a1")
+
+%def op_sub_int():
+% binop(instr="subu a0, a0, a1")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="subu a0, a0, a1")
+
+%def op_sub_long():
+% binopWide(instr="dsubu a0, a0, a1")
+
+%def op_sub_long_2addr():
+% binopWide2addr(instr="dsubu a0, a0, a1")
+
+%def op_ushr_int():
+% binop(instr="srl a0, a0, a1")
+
+%def op_ushr_int_2addr():
+% binop2addr(instr="srl a0, a0, a1")
+
+%def op_ushr_int_lit8():
+% binopLit8(instr="srl a0, a0, a1")
+
+%def op_ushr_long():
+% binopWide(instr="dsrl a0, a0, a1")
+
+%def op_ushr_long_2addr():
+% binopWide2addr(instr="dsrl a0, a0, a1")
+
+%def op_xor_int():
+% binop(instr="xor a0, a0, a1")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="xor a0, a0, a1")
+
+%def op_xor_int_lit8():
+% binopLit8(instr="xor a0, a0, a1")
+
+%def op_xor_long():
+% binopWide(instr="xor a0, a0, a1")
+
+%def op_xor_long_2addr():
+% binopWide2addr(instr="xor a0, a0, a1")
diff --git a/runtime/interpreter/mterp/mips64/array.S b/runtime/interpreter/mterp/mips64/array.S
new file mode 100644
index 0000000..9d97f0a
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/array.S
@@ -0,0 +1,241 @@
+%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ * NOTE: assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if $shift
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ $load a2, $data_offset(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_aget_boolean():
+% op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+ /*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ .extern artAGetObjectFromMterp
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ jal artAGetObjectFromMterp # (array, index)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ srl a4, rINST, 8 # a4 <- AA
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ SET_VREG_OBJECT v0, a4 # vAA <- v0
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_aget_short():
+% op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ */
+ /* aget-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
+ dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a2, a4 # vAA <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ * NOTE: this assumes data offset for arrays is the same for all non-wide types.
+ * If this changes, specialize.
+ */
+ /* op vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ .if $shift
+ # [d]lsa does not support shift count of 0.
+ dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
+ .else
+ daddu a0, a1, a0 # a0 <- arrayObj + index*width
+ .endif
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a2, a4 # a2 <- vAA
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ $store a2, $data_offset(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_aput_boolean():
+% op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ .extern MterpAputObject
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpAputObject
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_aput_short():
+% op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ srl a4, rINST, 8 # a4 <- AA
+ GET_VREG_U a0, a2 # a0 <- vBB (array object)
+ GET_VREG a1, a3 # a1 <- vCC (requested index)
+ beqz a0, common_errNullObject # bail if null array object
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
+ dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
+ GET_VREG_WIDE a2, a4 # a2 <- vAA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
+ dsrl32 a2, a2, 0
+ sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_array_length():
+ /*
+ * Return the length of an array.
+ */
+ srl a1, rINST, 12 # a1 <- B
+ GET_VREG_U a0, a1 # a0 <- vB (object ref)
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a3, a2 # vB <- length
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ .extern MterpFillArrayData
+ EXPORT_PC
+ lh a1, 2(rPC) # a1 <- bbbb (lo)
+ lh a0, 4(rPC) # a0 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a1, a0, 16, 16 # a1 <- BBBBbbbb
+ GET_VREG_U a0, a3 # a0 <- vAA (array object)
+ dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
+ jal MterpFillArrayData # (obj, payload)
+ beqzc v0, MterpPossibleException # exception?
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
+ .extern $helper
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rSELF
+ jal $helper
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class//CCCC */
+ .extern MterpNewArray
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ move a3, rSELF
+ jal MterpNewArray
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S
deleted file mode 100644
index c2bca91..0000000
--- a/runtime/interpreter/mterp/mips64/bincmp.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop.S b/runtime/interpreter/mterp/mips64/binop.S
deleted file mode 100644
index fab48b7..0000000
--- a/runtime/interpreter/mterp/mips64/binop.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG $result, a4 # vAA <- $result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binop2addr.S b/runtime/interpreter/mterp/mips64/binop2addr.S
deleted file mode 100644
index 1ae73f5..0000000
--- a/runtime/interpreter/mterp/mips64/binop2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG $result, a2 # vA <- $result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopLit16.S b/runtime/interpreter/mterp/mips64/binopLit16.S
deleted file mode 100644
index 9257758..0000000
--- a/runtime/interpreter/mterp/mips64/binopLit16.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG $result, a2 # vA <- $result
- GOTO_OPCODE v0 # jump to next instruction
-
diff --git a/runtime/interpreter/mterp/mips64/binopLit8.S b/runtime/interpreter/mterp/mips64/binopLit8.S
deleted file mode 100644
index f4a0bba..0000000
--- a/runtime/interpreter/mterp/mips64/binopLit8.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG $result, a2 # vAA <- $result
- GOTO_OPCODE v0 # jump to next instruction
-
diff --git a/runtime/interpreter/mterp/mips64/binopWide.S b/runtime/interpreter/mterp/mips64/binopWide.S
deleted file mode 100644
index 732f0d6..0000000
--- a/runtime/interpreter/mterp/mips64/binopWide.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE $result, a4 # vAA <- $result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/binopWide2addr.S b/runtime/interpreter/mterp/mips64/binopWide2addr.S
deleted file mode 100644
index 45d8d82..0000000
--- a/runtime/interpreter/mterp/mips64/binopWide2addr.S
+++ /dev/null
@@ -1,30 +0,0 @@
-%default {"preinstr":"", "result":"a0", "chkzero":"0"}
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if $chkzero
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- $preinstr # optional op
- $instr # $result <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE $result, a2 # vA <- $result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/const.S b/runtime/interpreter/mterp/mips64/const.S
deleted file mode 100644
index 2ec1173..0000000
--- a/runtime/interpreter/mterp/mips64/const.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal $helper # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/control_flow.S b/runtime/interpreter/mterp/mips64/control_flow.S
new file mode 100644
index 0000000..457b938
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/control_flow.S
@@ -0,0 +1,217 @@
+%def bincmp(condition=""):
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-le" you would use "le".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
+ GET_VREG a0, a2 # a0 <- vA
+ GET_VREG a1, a3 # a1 <- vB
+ b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
+ li v0, JIT_CHECK_OSR # possible OSR re-entry?
+ beqc rPROFILE, v0, .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def zcmp(condition=""):
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "condition"
+ * fragment that specifies the comparison to perform, e.g. for
+ * "if-lez" you would use "le".
+ *
+ * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
+ GET_VREG a0, a2 # a0 <- vAA
+ b${condition}zc a0, MterpCommonTakenBranchNoFlags
+ li v0, JIT_CHECK_OSR # possible OSR re-entry?
+ beqc rPROFILE, v0, .L_check_not_taken_osr
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_goto():
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ srl rINST, rINST, 8
+ seb rINST, rINST # rINST <- offset (sign-extended AA)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_16():
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_goto_32():
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ lh rINST, 2(rPC) # rINST <- aaaa (low)
+ lh a1, 4(rPC) # a1 <- AAAA (high)
+ ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa)
+ b MterpCommonTakenBranchNoFlags
+
+%def op_if_eq():
+% bincmp(condition="eq")
+
+%def op_if_eqz():
+% zcmp(condition="eq")
+
+%def op_if_ge():
+% bincmp(condition="ge")
+
+%def op_if_gez():
+% zcmp(condition="ge")
+
+%def op_if_gt():
+% bincmp(condition="gt")
+
+%def op_if_gtz():
+% zcmp(condition="gt")
+
+%def op_if_le():
+% bincmp(condition="le")
+
+%def op_if_lez():
+% zcmp(condition="le")
+
+%def op_if_lt():
+% bincmp(condition="lt")
+
+%def op_if_ltz():
+% zcmp(condition="lt")
+
+%def op_if_ne():
+% bincmp(condition="ne")
+
+%def op_if_nez():
+% zcmp(condition="ne")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBBBBBB */
+ .extern $func
+ lh a0, 2(rPC) # a0 <- bbbb (lo)
+ lh a1, 4(rPC) # a1 <- BBBB (hi)
+ srl a3, rINST, 8 # a3 <- AA
+ ins a0, a1, 16, 16 # a0 <- BBBBbbbb
+ GET_VREG a1, a3 # a1 <- vAA
+ dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
+ jal $func # v0 <- code-unit branch offset
+ move rINST, v0
+ b MterpCommonTakenBranchNoFlags
+
+%def op_return(instr="GET_VREG"):
+ /*
+ * Return a 32-bit value.
+ *
+ * for: return (sign-extend), return-object (zero-extend)
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ $instr a0, a2 # a0 <- vAA
+ b MterpReturn
+
+%def op_return_object():
+% op_return(instr="GET_VREG_U")
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
+
+%def op_return_void_no_barrier():
+ .extern MterpSuspendCheck
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ li a0, 0
+ b MterpReturn
+
+%def op_return_wide():
+ /*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ .extern MterpSuspendCheck
+ jal MterpThreadFenceForConstructor
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ move a0, rSELF
+ and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ beqzc ra, 1f
+ jal MterpSuspendCheck # (self)
+1:
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a2 # a0 <- vAA
+ b MterpReturn
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (exception object)
+ beqzc a0, common_errNullObject
+ sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
+ b MterpException
diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S
deleted file mode 100644
index ed965aa..0000000
--- a/runtime/interpreter/mterp/mips64/entry.S
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
- .set reorder
-
- .text
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-ExecuteMterpImpl:
- .cfi_startproc
- .cpsetup t9, t8, ExecuteMterpImpl
-
- .cfi_def_cfa sp, 0
- daddu sp, sp, -STACK_SIZE
- .cfi_adjust_cfa_offset STACK_SIZE
-
- sd t8, STACK_OFFSET_GP(sp)
- .cfi_rel_offset 28, STACK_OFFSET_GP
- sd ra, STACK_OFFSET_RA(sp)
- .cfi_rel_offset 31, STACK_OFFSET_RA
-
- sd s0, STACK_OFFSET_S0(sp)
- .cfi_rel_offset 16, STACK_OFFSET_S0
- sd s1, STACK_OFFSET_S1(sp)
- .cfi_rel_offset 17, STACK_OFFSET_S1
- sd s2, STACK_OFFSET_S2(sp)
- .cfi_rel_offset 18, STACK_OFFSET_S2
- sd s3, STACK_OFFSET_S3(sp)
- .cfi_rel_offset 19, STACK_OFFSET_S3
- sd s4, STACK_OFFSET_S4(sp)
- .cfi_rel_offset 20, STACK_OFFSET_S4
- sd s5, STACK_OFFSET_S5(sp)
- .cfi_rel_offset 21, STACK_OFFSET_S5
- sd s6, STACK_OFFSET_S6(sp)
- .cfi_rel_offset 22, STACK_OFFSET_S6
-
- /* Remember the return register */
- sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
- lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- dlsa rREFS, v0, rFP, 2
- lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
- dlsa rPC, v0, a1, 1
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* Set up for backwards branches & osr profiling */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- jal MterpSetUpHotnessCountdown
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/mips64/fallback.S b/runtime/interpreter/mterp/mips64/fallback.S
deleted file mode 100644
index 560b994..0000000
--- a/runtime/interpreter/mterp/mips64/fallback.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Transfer stub to alternate interpreter */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/fbinop.S b/runtime/interpreter/mterp/mips64/fbinop.S
deleted file mode 100644
index f19dd1c..0000000
--- a/runtime/interpreter/mterp/mips64/fbinop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {}
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- $instr # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinop2addr.S b/runtime/interpreter/mterp/mips64/fbinop2addr.S
deleted file mode 100644
index 2e2cd7e..0000000
--- a/runtime/interpreter/mterp/mips64/fbinop2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {}
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- $instr # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide.S b/runtime/interpreter/mterp/mips64/fbinopWide.S
deleted file mode 100644
index 8915c94..0000000
--- a/runtime/interpreter/mterp/mips64/fbinopWide.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {}
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- $instr # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
deleted file mode 100644
index a3f4eaa..0000000
--- a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {}
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- $instr # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmp.S b/runtime/interpreter/mterp/mips64/fcmp.S
deleted file mode 100644
index 2e1a3e4..0000000
--- a/runtime/interpreter/mterp/mips64/fcmp.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- cmp.eq.s f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- cmp.lt.s f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcmpWide.S b/runtime/interpreter/mterp/mips64/fcmpWide.S
deleted file mode 100644
index 2a3a341..0000000
--- a/runtime/interpreter/mterp/mips64/fcmpWide.S
+++ /dev/null
@@ -1,32 +0,0 @@
-%default {}
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- cmp.eq.d f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if $gt_bias
- cmp.lt.d f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtFooter.S b/runtime/interpreter/mterp/mips64/fcvtFooter.S
deleted file mode 100644
index 06e9507..0000000
--- a/runtime/interpreter/mterp/mips64/fcvtFooter.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG$suffix $valreg, a1
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/fcvtHeader.S b/runtime/interpreter/mterp/mips64/fcvtHeader.S
deleted file mode 100644
index 8742e42..0000000
--- a/runtime/interpreter/mterp/mips64/fcvtHeader.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG$suffix $valreg, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
diff --git a/runtime/interpreter/mterp/mips64/floating_point.S b/runtime/interpreter/mterp/mips64/floating_point.S
new file mode 100644
index 0000000..1132a09
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/floating_point.S
@@ -0,0 +1,382 @@
+%def fbinop(instr=""):
+ /*:
+ * Generic 32-bit floating-point operation.
+ *
+ * For: add-float, sub-float, mul-float, div-float.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fbinop2addr(instr=""):
+ /*:
+ * Generic 32-bit "/2addr" floating-point operation.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f0, a2 # f0 <- vA
+ GET_VREG_FLOAT f1, a3 # f1 <- vB
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fbinopWide(instr=""):
+ /*:
+ * Generic 64-bit floating-point operation.
+ *
+ * For: add-double, sub-double, mul-double, div-double.
+ * form: <op> f0, f0, f1
+ */
+ /* binop vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fbinopWide2addr(instr=""):
+ /*:
+ * Generic 64-bit "/2addr" floating-point operation.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
+ * form: <op> f0, f0, f1
+ */
+ /* binop/2addr vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f0, a2 # f0 <- vA
+ GET_VREG_DOUBLE f1, a3 # f1 <- vB
+ $instr # f0 <- f0 op f1
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fcmp(gt_bias=""):
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f0, a2 # f0 <- vBB
+ GET_VREG_FLOAT f1, a3 # f1 <- vCC
+ cmp.eq.s f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.s f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.s f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fcmpWide(gt_bias=""):
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ srl a4, rINST, 8 # a4 <- AA
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f0, a2 # f0 <- vBB
+ GET_VREG_DOUBLE f1, a3 # f1 <- vCC
+ cmp.eq.d f2, f0, f1
+ li a0, 0
+ bc1nez f2, 1f # done if vBB == vCC (ordered)
+ .if $gt_bias
+ cmp.lt.d f2, f0, f1
+ li a0, -1
+ bc1nez f2, 1f # done if vBB < vCC (ordered)
+ li a0, 1 # vBB > vCC or unordered
+ .else
+ cmp.lt.d f2, f1, f0
+ li a0, 1
+ bc1nez f2, 1f # done if vBB > vCC (ordered)
+ li a0, -1 # vBB < vCC or unordered
+ .endif
+1:
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a4 # vAA <- a0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fcvtFooter(suffix="", valreg=""):
+ /*
+ * Stores a specified register containing the result of conversion
+ * from or to a floating-point type and jumps to the next instruction.
+ *
+ * Expects a1 to contain the destination Dalvik register number.
+ * a1 is set up by fcvtHeader.S.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ *
+ * Note that this file can't be included after a break in other files
+ * and in those files its contents appear as a copy.
+ * See: float-to-int, float-to-long, double-to-int, double-to-long.
+ */
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG$suffix $valreg, a1
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def fcvtHeader(suffix="", valreg=""):
+ /*
+ * Loads a specified register from vB. Used primarily for conversions
+ * from or to a floating-point type.
+ *
+ * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
+ * store the result in vA and jump to the next instruction.
+ *
+ * For: int-to-float, int-to-double, long-to-float, long-to-double,
+ * float-to-int, float-to-long, float-to-double, double-to-int,
+ * double-to-long, double-to-float, neg-float, neg-double.
+ */
+ ext a1, rINST, 8, 4 # a1 <- A
+ srl a2, rINST, 12 # a2 <- B
+ GET_VREG$suffix $valreg, a2
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+
+%def op_add_double():
+% fbinopWide(instr="add.d f0, f0, f1")
+
+%def op_add_double_2addr():
+% fbinopWide2addr(instr="add.d f0, f0, f1")
+
+%def op_add_float():
+% fbinop(instr="add.s f0, f0, f1")
+
+%def op_add_float_2addr():
+% fbinop2addr(instr="add.s f0, f0, f1")
+
+%def op_cmpg_double():
+% fcmpWide(gt_bias="1")
+
+%def op_cmpg_float():
+% fcmp(gt_bias="1")
+
+%def op_cmpl_double():
+% fcmpWide(gt_bias="0")
+
+%def op_cmpl_float():
+% fcmp(gt_bias="0")
+
+%def op_div_double():
+% fbinopWide(instr="div.d f0, f0, f1")
+
+%def op_div_double_2addr():
+% fbinopWide2addr(instr="div.d f0, f0, f1")
+
+%def op_div_float():
+% fbinop(instr="div.s f0, f0, f1")
+
+%def op_div_float_2addr():
+% fbinop2addr(instr="div.s f0, f0, f1")
+
+%def op_double_to_float():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ cvt.s.d f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_double_to_int():
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ trunc.w.d f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_double_to_long():
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ trunc.l.d f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_float_to_double():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ cvt.d.s f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_float_to_int():
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ trunc.w.s f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_float_to_long():
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ trunc.l.s f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_int_to_double():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ cvt.d.w f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_int_to_float():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ cvt.s.w f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_long_to_double():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ cvt.d.l f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_long_to_float():
+ /*
+ * Conversion from or to floating-point happens in a floating-point register.
+ * Therefore we load the input and store the output into or from a
+ * floating-point register irrespective of the type.
+ */
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ cvt.s.l f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_mul_double():
+% fbinopWide(instr="mul.d f0, f0, f1")
+
+%def op_mul_double_2addr():
+% fbinopWide2addr(instr="mul.d f0, f0, f1")
+
+%def op_mul_float():
+% fbinop(instr="mul.s f0, f0, f1")
+
+%def op_mul_float_2addr():
+% fbinop2addr(instr="mul.s f0, f0, f1")
+
+%def op_neg_double():
+% fcvtHeader(suffix="_DOUBLE", valreg="f0")
+ neg.d f0, f0
+% fcvtFooter(suffix="_DOUBLE", valreg="f0")
+
+%def op_neg_float():
+% fcvtHeader(suffix="_FLOAT", valreg="f0")
+ neg.s f0, f0
+% fcvtFooter(suffix="_FLOAT", valreg="f0")
+
+%def op_rem_double():
+ /* rem-double vAA, vBB, vCC */
+ .extern fmod
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_DOUBLE f12, a2 # f12 <- vBB
+ GET_VREG_DOUBLE f13, a3 # f13 <- vCC
+ jal fmod # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_rem_double_2addr():
+ /* rem-double/2addr vA, vB */
+ .extern fmod
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_DOUBLE f12, a2 # f12 <- vA
+ GET_VREG_DOUBLE f13, a3 # f13 <- vB
+ jal fmod # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_DOUBLE f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_rem_float():
+ /* rem-float vAA, vBB, vCC */
+ .extern fmodf
+ lbu a2, 2(rPC) # a2 <- BB
+ lbu a3, 3(rPC) # a3 <- CC
+ GET_VREG_FLOAT f12, a2 # f12 <- vBB
+ GET_VREG_FLOAT f13, a3 # f13 <- vCC
+ jal fmodf # f0 <- f12 op f13
+ srl a4, rINST, 8 # a4 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a4 # vAA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_rem_float_2addr():
+ /* rem-float/2addr vA, vB */
+ .extern fmodf
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ GET_VREG_FLOAT f12, a2 # f12 <- vA
+ GET_VREG_FLOAT f13, a3 # f13 <- vB
+ jal fmodf # f0 <- f12 op f13
+ ext a2, rINST, 8, 4 # a2 <- A
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_FLOAT f0, a2 # vA <- f0
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_sub_double():
+% fbinopWide(instr="sub.d f0, f0, f1")
+
+%def op_sub_double_2addr():
+% fbinopWide2addr(instr="sub.d f0, f0, f1")
+
+%def op_sub_float():
+% fbinop(instr="sub.s f0, f0, f1")
+
+%def op_sub_float_2addr():
+% fbinop2addr(instr="sub.s f0, f0, f1")
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
deleted file mode 100644
index 779b1fb..0000000
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
- .extern MterpLogDivideByZeroException
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogArrayIndexException
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogNullObjectException
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqzc a0, MterpFallback # If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
- .extern MterpHandleException
- .extern MterpShouldSwitchInterpreters
-MterpException:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpHandleException # (self, shadow_frame)
- beqzc v0, MterpExceptionReturn # no local catch, back to caller.
- ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lwu a1, OFF_FP_DEX_PC(rFP)
- REFRESH_IBASE
- dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- bgtzc rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
- li v0, JIT_CHECK_OSR
- beqc rPROFILE, v0, .L_osr_check
- bltc rPROFILE, v0, .L_resume_backward_branch
- dsubu rPROFILE, 1
- beqzc rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnezc ra, .L_suspend_request_pending
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- move a0, rSELF
- jal MterpSuspendCheck # (self)
- bnezc v0, MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_no_count_backwards:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- bnec rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- beqc rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- ld a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST # rINST contains offset
- jal MterpLogOSR
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
- .extern MterpLogFallback
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogFallback
-#endif
-MterpCommonFallback:
- li v0, 0 # signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA. Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
- ld a2, OFF_FP_RESULT_REGISTER(rFP)
- sd a0, 0(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
- ld s6, STACK_OFFSET_S6(sp)
- .cfi_restore 22
- ld s5, STACK_OFFSET_S5(sp)
- .cfi_restore 21
- ld s4, STACK_OFFSET_S4(sp)
- .cfi_restore 20
- ld s3, STACK_OFFSET_S3(sp)
- .cfi_restore 19
- ld s2, STACK_OFFSET_S2(sp)
- .cfi_restore 18
- ld s1, STACK_OFFSET_S1(sp)
- .cfi_restore 17
- ld s0, STACK_OFFSET_S0(sp)
- .cfi_restore 16
-
- ld ra, STACK_OFFSET_RA(sp)
- .cfi_restore 31
-
- ld t8, STACK_OFFSET_GP(sp)
- .cpreturn
- .cfi_restore 28
-
- .set noreorder
- jr ra
- daddu sp, sp, STACK_SIZE
- .cfi_adjust_cfa_offset -STACK_SIZE
-
- .cfi_endproc
- .set reorder
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S
deleted file mode 100644
index 7e1446c..0000000
--- a/runtime/interpreter/mterp/mips64/header.S
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $$0 /* always zero */
-#define AT $$at /* assembler temp */
-#define v0 $$2 /* return value */
-#define v1 $$3
-#define a0 $$4 /* argument registers */
-#define a1 $$5
-#define a2 $$6
-#define a3 $$7
-#define a4 $$8 /* expanded register arguments */
-#define a5 $$9
-#define a6 $$10
-#define a7 $$11
-#define ta0 $$8 /* alias */
-#define ta1 $$9
-#define ta2 $$10
-#define ta3 $$11
-#define t0 $$12 /* temp registers (not saved across subroutine calls) */
-#define t1 $$13
-#define t2 $$14
-#define t3 $$15
-
-#define s0 $$16 /* saved across subroutine calls (callee saved) */
-#define s1 $$17
-#define s2 $$18
-#define s3 $$19
-#define s4 $$20
-#define s5 $$21
-#define s6 $$22
-#define s7 $$23
-#define t8 $$24 /* two more temp registers */
-#define t9 $$25
-#define k0 $$26 /* kernel temporary */
-#define k1 $$27
-#define gp $$28 /* global pointer */
-#define sp $$29 /* stack pointer */
-#define s8 $$30 /* one more callee saved */
-#define ra $$31 /* return address */
-
-#define f0 $$f0
-#define f1 $$f1
-#define f2 $$f2
-#define f3 $$f3
-#define f12 $$f12
-#define f13 $$f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
- bgec \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
- bltc \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rINST first 16-bit code unit of current instruction
- s4 rIBASE interpreted instruction base pointer, used for computed goto
- s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s6 rPROFILE jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rINST s3
-#define rIBASE s4
-#define rREFS s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- sd rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- lhu rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- daddu rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- daddu rPC, rPC, \reg
- FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ADVANCE \count
- FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- lhu rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
- .set noat
- sll AT, \reg, 7
- daddu AT, rIBASE, AT
- jic AT, 0
- .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_U reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwu \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- lw AT, 4(AT)
- dinsu \reg, AT, 32, 32
- .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- lw AT, 4(AT)
- mthc1 AT, \reg
- .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- drotr32 \reg, \reg, 0
- sw \reg, 4(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- mfhc1 \vreg, \reg
- sw \vreg, 4(AT)
- .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE 80 /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
-#define LONG_MIN 0x8000000000000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
diff --git a/runtime/interpreter/mterp/mips64/instruction_end.S b/runtime/interpreter/mterp/mips64/instruction_end.S
deleted file mode 100644
index 32c725c..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_end_alt.S b/runtime/interpreter/mterp/mips64/instruction_end_alt.S
deleted file mode 100644
index f90916f..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end_alt.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_end_sister.S b/runtime/interpreter/mterp/mips64/instruction_end_sister.S
deleted file mode 100644
index c5f4886..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_end_sister.S
+++ /dev/null
@@ -1,3 +0,0 @@
-
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
diff --git a/runtime/interpreter/mterp/mips64/instruction_start.S b/runtime/interpreter/mterp/mips64/instruction_start.S
deleted file mode 100644
index 8874c20..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/mips64/instruction_start_alt.S b/runtime/interpreter/mterp/mips64/instruction_start_alt.S
deleted file mode 100644
index 0c9ffdb..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start_alt.S
+++ /dev/null
@@ -1,4 +0,0 @@
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
diff --git a/runtime/interpreter/mterp/mips64/instruction_start_sister.S b/runtime/interpreter/mterp/mips64/instruction_start_sister.S
deleted file mode 100644
index 2ec51f7..0000000
--- a/runtime/interpreter/mterp/mips64/instruction_start_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
index be647b6..c2967cf 100644
--- a/runtime/interpreter/mterp/mips64/invoke.S
+++ b/runtime/interpreter/mterp/mips64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -18,3 +18,93 @@
bnezc v0, MterpFallback
GET_INST_OPCODE v0
GOTO_OPCODE v0
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ .extern MterpShouldSwitchInterpreters
+ EXPORT_PC
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ move a3, rINST
+ jal $helper
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 4
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips64/invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
deleted file mode 100644
index fa82083..0000000
--- a/runtime/interpreter/mterp/mips64/invoke_polymorphic.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal $helper
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 4
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/main.S b/runtime/interpreter/mterp/mips64/main.S
new file mode 100644
index 0000000..92bddb0
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/main.S
@@ -0,0 +1,753 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define zero $$0 /* always zero */
+#define AT $$at /* assembler temp */
+#define v0 $$2 /* return value */
+#define v1 $$3
+#define a0 $$4 /* argument registers */
+#define a1 $$5
+#define a2 $$6
+#define a3 $$7
+#define a4 $$8 /* expanded register arguments */
+#define a5 $$9
+#define a6 $$10
+#define a7 $$11
+#define ta0 $$8 /* alias */
+#define ta1 $$9
+#define ta2 $$10
+#define ta3 $$11
+#define t0 $$12 /* temp registers (not saved across subroutine calls) */
+#define t1 $$13
+#define t2 $$14
+#define t3 $$15
+
+#define s0 $$16 /* saved across subroutine calls (callee saved) */
+#define s1 $$17
+#define s2 $$18
+#define s3 $$19
+#define s4 $$20
+#define s5 $$21
+#define s6 $$22
+#define s7 $$23
+#define t8 $$24 /* two more temp registers */
+#define t9 $$25
+#define k0 $$26 /* kernel temporary */
+#define k1 $$27
+#define gp $$28 /* global pointer */
+#define sp $$29 /* stack pointer */
+#define s8 $$30 /* one more callee saved */
+#define ra $$31 /* return address */
+
+#define f0 $$f0
+#define f1 $$f1
+#define f2 $$f2
+#define f3 $$f3
+#define f12 $$f12
+#define f13 $$f13
+
+/*
+ * It looks like the GNU assembler currently does not support the blec and bgtc
+ * idioms, which should translate into bgec and bltc respectively with swapped
+ * left and right register operands.
+ * TODO: remove these macros when the assembler is fixed.
+ */
+.macro blec lreg, rreg, target
+ bgec \rreg, \lreg, \target
+.endm
+.macro bgtc lreg, rreg, target
+ bltc \rreg, \lreg, \target
+.endm
+
+/*
+Mterp and MIPS64 notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rINST first 16-bit code unit of current instruction
+ s4 rIBASE interpreted instruction base pointer, used for computed goto
+ s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
+ s6 rPROFILE jit profile hotness countdown
+*/
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
+#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
+#define rFP s1
+#define rSELF s2
+#define rINST s3
+#define rIBASE s4
+#define rREFS s5
+#define rPROFILE s6
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+#define MTERP_PROFILE_BRANCHES 1
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ sd rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+.macro FETCH_INST
+ lhu rINST, 0(rPC)
+.endm
+
+/* Advance rPC by some number of code units. */
+.macro ADVANCE count
+ daddu rPC, rPC, (\count) * 2
+.endm
+
+/*
+ * Fetch the next instruction from an offset specified by _reg and advance xPC.
+ * xPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
+ *
+ */
+.macro FETCH_ADVANCE_INST_RB reg
+ daddu rPC, rPC, \reg
+ FETCH_INST
+.endm
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction.
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC.)
+ */
+.macro FETCH_ADVANCE_INST count
+ ADVANCE \count
+ FETCH_INST
+.endm
+
+/*
+ * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
+ * rINST ahead of possible exception point. Be sure to manually advance rPC
+ * later.
+ */
+.macro PREFETCH_INST count
+ lhu rINST, ((\count) * 2)(rPC)
+.endm
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+.macro GET_INST_OPCODE reg
+ and \reg, rINST, 255
+.endm
+
+/*
+ * Begin executing the opcode in _reg.
+ */
+.macro GOTO_OPCODE reg
+ .set noat
+ sll AT, \reg, 7
+ daddu AT, rIBASE, AT
+ jic AT, 0
+ .set at
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ * Note, GET_VREG does sign extension to 64 bits while
+ * GET_VREG_U does zero extension to 64 bits.
+ * One is useful for arithmetic while the other is
+ * useful for storing the result value as 64-bit.
+ */
+.macro GET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_U reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwu \reg, 0(AT)
+ .set at
+.endm
+.macro GET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_OBJECT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw \reg, 0(AT)
+ .set at
+.endm
+.macro SET_VREG_FLOAT reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ .set at
+.endm
+
+/*
+ * Get/set the 64-bit value from a Dalvik register.
+ * Avoid unaligned memory accesses.
+ * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
+ * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
+ */
+.macro GET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lw \reg, 0(AT)
+ lw AT, 4(AT)
+ dinsu \reg, AT, 32, 32
+ .set at
+.endm
+.macro GET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ lwc1 \reg, 0(AT)
+ lw AT, 4(AT)
+ mthc1 AT, \reg
+ .set at
+.endm
+.macro SET_VREG_WIDE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rFP, 2
+ sw \reg, 0(AT)
+ drotr32 \reg, \reg, 0
+ sw \reg, 4(AT)
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ .set at
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+ .set noat
+ dlsa AT, \vreg, rREFS, 2
+ sw zero, 0(AT)
+ sw zero, 4(AT)
+ dlsa AT, \vreg, rFP, 2
+ swc1 \reg, 0(AT)
+ mfhc1 \vreg, \reg
+ sw \vreg, 4(AT)
+ .set at
+.endm
+
+/*
+ * On-stack offsets for spilling/unspilling callee-saved registers
+ * and the frame size.
+ */
+#define STACK_OFFSET_RA 0
+#define STACK_OFFSET_GP 8
+#define STACK_OFFSET_S0 16
+#define STACK_OFFSET_S1 24
+#define STACK_OFFSET_S2 32
+#define STACK_OFFSET_S3 40
+#define STACK_OFFSET_S4 48
+#define STACK_OFFSET_S5 56
+#define STACK_OFFSET_S6 64
+#define STACK_SIZE 80 /* needs 16 byte alignment */
+
+/* Constants for float/double_to_int/long conversions */
+#define INT_MIN 0x80000000
+#define INT_MIN_AS_FLOAT 0xCF000000
+#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
+#define LONG_MIN 0x8000000000000000
+#define LONG_MIN_AS_FLOAT 0xDF000000
+#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Interpreter entry point.
+ */
+
+ .set reorder
+
+ .text
+ .global ExecuteMterpImpl
+ .type ExecuteMterpImpl, %function
+ .balign 16
+/*
+ * On entry:
+ * a0 Thread* self
+ * a1 dex_instructions
+ * a2 ShadowFrame
+ * a3 JValue* result_register
+ *
+ */
+ExecuteMterpImpl:
+ .cfi_startproc
+ .cpsetup t9, t8, ExecuteMterpImpl
+
+ .cfi_def_cfa sp, 0
+ daddu sp, sp, -STACK_SIZE
+ .cfi_adjust_cfa_offset STACK_SIZE
+
+ sd t8, STACK_OFFSET_GP(sp)
+ .cfi_rel_offset 28, STACK_OFFSET_GP
+ sd ra, STACK_OFFSET_RA(sp)
+ .cfi_rel_offset 31, STACK_OFFSET_RA
+
+ sd s0, STACK_OFFSET_S0(sp)
+ .cfi_rel_offset 16, STACK_OFFSET_S0
+ sd s1, STACK_OFFSET_S1(sp)
+ .cfi_rel_offset 17, STACK_OFFSET_S1
+ sd s2, STACK_OFFSET_S2(sp)
+ .cfi_rel_offset 18, STACK_OFFSET_S2
+ sd s3, STACK_OFFSET_S3(sp)
+ .cfi_rel_offset 19, STACK_OFFSET_S3
+ sd s4, STACK_OFFSET_S4(sp)
+ .cfi_rel_offset 20, STACK_OFFSET_S4
+ sd s5, STACK_OFFSET_S5(sp)
+ .cfi_rel_offset 21, STACK_OFFSET_S5
+ sd s6, STACK_OFFSET_S6(sp)
+ .cfi_rel_offset 22, STACK_OFFSET_S6
+
+ /* Remember the return register */
+ sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
+
+ /* Remember the dex instruction pointer */
+ sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
+
+ /* set up "named" registers */
+ move rSELF, a0
+ daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
+ lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
+ dlsa rREFS, v0, rFP, 2
+ lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
+ dlsa rPC, v0, a1, 1
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+ EXPORT_PC
+
+ /* Starting ibase */
+ REFRESH_IBASE
+
+ /* Set up for backwards branches & osr profiling */
+ ld a0, OFF_FP_METHOD(rFP)
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rSELF
+ jal MterpSetUpHotnessCountdown
+ move rPROFILE, v0 # Starting hotness countdown to rPROFILE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+
+ /* NOTE: no fallthrough */
+
+%def alt_stub():
+/*
+ * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to MterpCheckBefore is done as a tail call.
+ */
+ .extern MterpCheckBefore
+ REFRESH_IBASE
+ dla ra, artMterpAsmInstructionStart
+ dla t9, MterpCheckBefore
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rPC
+ daddu ra, ra, (${opnum} * 128) # Addr of primary handler.
+ jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ b MterpFallback
+
+%def helpers():
+% pass
+
+%def footer():
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+
+ .extern MterpLogDivideByZeroException
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogDivideByZeroException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogArrayIndexException
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogArrayIndexException
+#endif
+ b MterpCommonFallback
+
+ .extern MterpLogNullObjectException
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogNullObjectException
+#endif
+ b MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
+ beqzc a0, MterpFallback # If not, fall back to reference interpreter.
+ /* intentional fallthrough - handle pending exception. */
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+ .extern MterpHandleException
+ .extern MterpShouldSwitchInterpreters
+MterpException:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpHandleException # (self, shadow_frame)
+ beqzc v0, MterpExceptionReturn # no local catch, back to caller.
+ ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
+ lwu a1, OFF_FP_DEX_PC(rFP)
+ REFRESH_IBASE
+ dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
+ /* Do we need to switch interpreters? */
+ jal MterpShouldSwitchInterpreters
+ bnezc v0, MterpFallback
+ /* resume execution at catch block */
+ EXPORT_PC
+ FETCH_INST
+ GET_INST_OPCODE v0
+ GOTO_OPCODE v0
+ /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * rINST <= signed offset
+ * rPROFILE <= signed hotness countdown (expanded to 64 bits)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranchNoFlags:
+ bgtzc rINST, .L_forward_branch # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+ li v0, JIT_CHECK_OSR
+ beqc rPROFILE, v0, .L_osr_check
+ bltc rPROFILE, v0, .L_resume_backward_branch
+ dsubu rPROFILE, 1
+ beqzc rPROFILE, .L_add_batch # counted down to zero - report
+.L_resume_backward_branch:
+ lw ra, THREAD_FLAGS_OFFSET(rSELF)
+ REFRESH_IBASE
+ daddu a2, rINST, rINST # a2<- byte offset
+ FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
+ and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
+ bnezc ra, .L_suspend_request_pending
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ move a0, rSELF
+ jal MterpSuspendCheck # (self)
+ bnezc v0, MterpFallback
+ REFRESH_IBASE # might have changed during suspend
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+.L_no_count_backwards:
+ li v0, JIT_CHECK_OSR # check for possible OSR re-entry
+ bnec rPROFILE, v0, .L_resume_backward_branch
+.L_osr_check:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ EXPORT_PC
+ jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
+ bnezc v0, MterpOnStackReplacement
+ b .L_resume_backward_branch
+
+.L_forward_branch:
+ li v0, JIT_CHECK_OSR # check for possible OSR re-entry
+ beqc rPROFILE, v0, .L_check_osr_forward
+.L_resume_forward_branch:
+ daddu a2, rINST, rINST # a2<- byte offset
+ FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+.L_check_osr_forward:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST
+ EXPORT_PC
+ jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
+ bnezc v0, MterpOnStackReplacement
+ b .L_resume_forward_branch
+
+.L_add_batch:
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+ ld a0, OFF_FP_METHOD(rFP)
+ move a2, rSELF
+ jal MterpAddHotnessBatch # (method, shadow_frame, self)
+ move rPROFILE, v0 # restore new hotness countdown to rPROFILE
+ b .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ li a2, 2
+ EXPORT_PC
+ jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
+ bnezc v0, MterpOnStackReplacement
+ FETCH_ADVANCE_INST 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rINST # rINST contains offset
+ jal MterpLogOSR
+#endif
+ li v0, 1 # Signal normal return
+ b MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+ .extern MterpLogFallback
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ move a0, rSELF
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ jal MterpLogFallback
+#endif
+MterpCommonFallback:
+ li v0, 0 # signal retry with reference interpreter.
+ b MterpDone
+
+/*
+ * We pushed some registers on the stack in ExecuteMterpImpl, then saved
+ * SP and RA. Here we restore SP, restore the registers, and then restore
+ * RA to PC.
+ *
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ li v0, 1 # signal return to caller.
+ b MterpDone
+/*
+ * Returned value is expected in a0 and if it's not 64-bit, the 32 most
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
+ */
+MterpReturn:
+ ld a2, OFF_FP_RESULT_REGISTER(rFP)
+ sd a0, 0(a2)
+ li v0, 1 # signal return to caller.
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
+
+MterpProfileActive:
+ move rINST, v0 # stash return value
+ /* Report cached hotness counts */
+ ld a0, OFF_FP_METHOD(rFP)
+ daddu a1, rFP, OFF_FP_SHADOWFRAME
+ move a2, rSELF
+ sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
+ jal MterpAddHotnessBatch # (method, shadow_frame, self)
+ move v0, rINST # restore return value
+
+.L_pop_and_return:
+ ld s6, STACK_OFFSET_S6(sp)
+ .cfi_restore 22
+ ld s5, STACK_OFFSET_S5(sp)
+ .cfi_restore 21
+ ld s4, STACK_OFFSET_S4(sp)
+ .cfi_restore 20
+ ld s3, STACK_OFFSET_S3(sp)
+ .cfi_restore 19
+ ld s2, STACK_OFFSET_S2(sp)
+ .cfi_restore 18
+ ld s1, STACK_OFFSET_S1(sp)
+ .cfi_restore 17
+ ld s0, STACK_OFFSET_S0(sp)
+ .cfi_restore 16
+
+ ld ra, STACK_OFFSET_RA(sp)
+ .cfi_restore 31
+
+ ld t8, STACK_OFFSET_GP(sp)
+ .cpreturn
+ .cfi_restore 28
+
+ .set noreorder
+ jr ra
+ daddu sp, sp, STACK_SIZE
+ .cfi_adjust_cfa_offset -STACK_SIZE
+
+ .cfi_endproc
+ .set reorder
+ .size ExecuteMterpImpl, .-ExecuteMterpImpl
+
+%def instruction_end():
+
+ .global artMterpAsmInstructionEnd
+artMterpAsmInstructionEnd:
+
+%def instruction_end_alt():
+
+ .global artMterpAsmAltInstructionEnd
+artMterpAsmAltInstructionEnd:
+
+%def instruction_start():
+
+ .global artMterpAsmInstructionStart
+artMterpAsmInstructionStart = .L_op_nop
+ .text
+
+%def instruction_start_alt():
+
+ .global artMterpAsmAltInstructionStart
+artMterpAsmAltInstructionStart = .L_ALT_op_nop
+ .text
+
+%def opcode_start():
+% pass
+%def opcode_end():
+% pass
diff --git a/runtime/interpreter/mterp/mips64/object.S b/runtime/interpreter/mterp/mips64/object.S
new file mode 100644
index 0000000..a5a2b3d
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/object.S
@@ -0,0 +1,262 @@
+%def field(helper=""):
+TODO
+
+%def op_check_cast():
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class//BBBB */
+ .extern MterpCheckCast
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpCheckCast # (index, &obj, method, self)
+ PREFETCH_INST 2
+ bnez v0, MterpPossibleException
+ ADVANCE 2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+% field(helper=helper)
+
+%def op_iget_boolean():
+% op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="lbu")
+
+%def op_iget_byte():
+% op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="lb")
+
+%def op_iget_char():
+% op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="lhu")
+
+%def op_iget_object():
+% op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset//CCCC */
+ .extern artIGetObjectFromMterp
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ EXPORT_PC
+ GET_VREG_U a0, a2 # a0 <- object we're operating on
+ jal artIGetObjectFromMterp # (obj, offset)
+ ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a3, MterpPossibleException # bail out
+ SET_VREG_OBJECT v0, a2 # fp[A] <- v0
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iget_quick(load="lw"):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a4, rINST, 8, 4 # a4 <- A
+ daddu a1, a1, a3
+ beqz a3, common_errNullObject # object was null
+ $load a0, 0(a1) # a0 <- obj.field
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG a0, a4 # fp[A] <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iget_short():
+% op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="lh")
+
+%def op_iget_wide():
+% op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+ /* iget-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a4, 2(rPC) # a4 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- object we're operating on
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ daddu a4, a3, a4 # create direct pointer
+ lw a0, 0(a4)
+ lw a1, 4(a4)
+ dinsu a0, a1, 32, 32
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ SET_VREG_WIDE a0, a2
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_instance_of():
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class//CCCC */
+ .extern MterpInstanceOf
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- CCCC
+ srl a1, rINST, 12 # a1 <- B
+ dlsa a1, a1, rFP, 2 # a1 <- &object
+ ld a2, OFF_FP_METHOD(rFP) # a2 <- method
+ move a3, rSELF # a3 <- self
+ jal MterpInstanceOf # (index, &obj, method, self)
+ ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
+ ext a2, rINST, 8, 4 # a2 <- A
+ PREFETCH_INST 2
+ bnez a1, MterpException
+ ADVANCE 2 # advance rPC
+ SET_VREG v0, a2 # vA <- v0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(store="sb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(store="sb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(store="sh")
+
+%def op_iput_object():
+% op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ .extern MterpIputObjectQuick
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rPC
+ move a2, rINST
+ jal MterpIputObjectQuick
+ beqzc v0, MterpException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iput_quick(store="sw"):
+ /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
+ /* op vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a1, 2(rPC) # a1 <- field byte offset
+ GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
+ ext a2, rINST, 8, 4 # a2 <- A
+ beqz a3, common_errNullObject # object was null
+ GET_VREG a0, a2 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a1, a3
+ $store a0, 0(a1) # obj.field <- a0
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(store="sh")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset//CCCC */
+ srl a2, rINST, 12 # a2 <- B
+ lhu a3, 2(rPC) # a3 <- field byte offset
+ GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
+ ext a0, rINST, 8, 4 # a0 <- A
+ beqz a2, common_errNullObject # object was null
+ GET_VREG_WIDE a0, a0 # a0 <- fp[A]
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ daddu a1, a2, a3 # create a direct pointer
+ sw a0, 0(a1)
+ dsrl32 a0, a0, 0
+ sw a0, 4(a1)
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_new_instance():
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class//BBBB */
+ .extern MterpNewInstance
+ EXPORT_PC
+ daddu a0, rFP, OFF_FP_SHADOWFRAME
+ move a1, rSELF
+ move a2, rINST
+ jal MterpNewInstance # (shadow_frame, self, inst_data)
+ beqzc v0, MterpPossibleException
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips64/op_add_double.S b/runtime/interpreter/mterp/mips64/op_add_double.S
deleted file mode 100644
index 1520e32..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
deleted file mode 100644
index c14382e..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"add.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float.S b/runtime/interpreter/mterp/mips64/op_add_float.S
deleted file mode 100644
index c6ed558..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
deleted file mode 100644
index 4c20547..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"add.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int.S b/runtime/interpreter/mterp/mips64/op_add_int.S
deleted file mode 100644
index 6e569de..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
deleted file mode 100644
index 2a84124..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
deleted file mode 100644
index 94b053b..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
deleted file mode 100644
index 3b6d734..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long.S b/runtime/interpreter/mterp/mips64/op_add_long.S
deleted file mode 100644
index c8d702f..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
deleted file mode 100644
index 928ff54..0000000
--- a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"daddu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aget.S b/runtime/interpreter/mterp/mips64/op_aget.S
deleted file mode 100644
index 0472a06..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if $shift
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- $load a2, $data_offset(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_boolean.S b/runtime/interpreter/mterp/mips64/op_aget_boolean.S
deleted file mode 100644
index d5be01b..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_byte.S b/runtime/interpreter/mterp/mips64/op_aget_byte.S
deleted file mode 100644
index 084de8d..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_char.S b/runtime/interpreter/mterp/mips64/op_aget_char.S
deleted file mode 100644
index 6c99ed5..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_object.S b/runtime/interpreter/mterp/mips64/op_aget_object.S
deleted file mode 100644
index 6374a05..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_object.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- .extern artAGetObjectFromMterp
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- jal artAGetObjectFromMterp # (array, index)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a4, rINST, 8 # a4 <- AA
- PREFETCH_INST 2
- bnez a1, MterpException
- SET_VREG_OBJECT v0, a4 # vAA <- v0
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aget_short.S b/runtime/interpreter/mterp/mips64/op_aget_short.S
deleted file mode 100644
index 0158b0a..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aget_wide.S b/runtime/interpreter/mterp/mips64/op_aget_wide.S
deleted file mode 100644
index 0945aca..0000000
--- a/runtime/interpreter/mterp/mips64/op_aget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
- dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_and_int.S b/runtime/interpreter/mterp/mips64/op_and_int.S
deleted file mode 100644
index f0792a8..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
deleted file mode 100644
index 08dc615..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
deleted file mode 100644
index 65d28ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
deleted file mode 100644
index ab84bb7..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long.S b/runtime/interpreter/mterp/mips64/op_and_long.S
deleted file mode 100644
index e383ba0..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
deleted file mode 100644
index f863bb9..0000000
--- a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_aput.S b/runtime/interpreter/mterp/mips64/op_aput.S
deleted file mode 100644
index 9bfda97..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if $shift
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- $store a2, $data_offset(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_boolean.S b/runtime/interpreter/mterp/mips64/op_aput_boolean.S
deleted file mode 100644
index 6707a1f..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_byte.S b/runtime/interpreter/mterp/mips64/op_aput_byte.S
deleted file mode 100644
index 7b9ce48..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_char.S b/runtime/interpreter/mterp/mips64/op_aput_char.S
deleted file mode 100644
index 82bc8f7..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_object.S b/runtime/interpreter/mterp/mips64/op_aput_object.S
deleted file mode 100644
index b132456..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_object.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- .extern MterpAputObject
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpAputObject
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_aput_short.S b/runtime/interpreter/mterp/mips64/op_aput_short.S
deleted file mode 100644
index a7af294..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/mips64/op_aput_wide.S b/runtime/interpreter/mterp/mips64/op_aput_wide.S
deleted file mode 100644
index a1d7a3b..0000000
--- a/runtime/interpreter/mterp/mips64/op_aput_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- GET_VREG_WIDE a2, a4 # a2 <- vAA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- dsrl32 a2, a2, 0
- sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_array_length.S b/runtime/interpreter/mterp/mips64/op_array_length.S
deleted file mode 100644
index 2d9e172..0000000
--- a/runtime/interpreter/mterp/mips64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /*
- * Return the length of an array.
- */
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a0, a1 # a0 <- vB (object ref)
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a3, a2 # vB <- length
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_check_cast.S b/runtime/interpreter/mterp/mips64/op_check_cast.S
deleted file mode 100644
index 472595d..0000000
--- a/runtime/interpreter/mterp/mips64/op_check_cast.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- .extern MterpCheckCast
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpCheckCast # (index, &obj, method, self)
- PREFETCH_INST 2
- bnez v0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmp_long.S b/runtime/interpreter/mterp/mips64/op_cmp_long.S
deleted file mode 100644
index 6e9376c..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmp_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* cmp-long vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- slt a2, a0, a1
- slt a0, a1, a0
- subu a0, a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_double.S b/runtime/interpreter/mterp/mips64/op_cmpg_double.S
deleted file mode 100644
index a8e2ef9..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmpWide.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_float.S b/runtime/interpreter/mterp/mips64/op_cmpg_float.S
deleted file mode 100644
index 0c93eac..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmp.S" {"gt_bias":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_double.S b/runtime/interpreter/mterp/mips64/op_cmpl_double.S
deleted file mode 100644
index 9111b06..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmpWide.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_float.S b/runtime/interpreter/mterp/mips64/op_cmpl_float.S
deleted file mode 100644
index b047451..0000000
--- a/runtime/interpreter/mterp/mips64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fcmp.S" {"gt_bias":"0"}
diff --git a/runtime/interpreter/mterp/mips64/op_const.S b/runtime/interpreter/mterp/mips64/op_const.S
deleted file mode 100644
index 4b0d69b..0000000
--- a/runtime/interpreter/mterp/mips64/op_const.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_16.S b/runtime/interpreter/mterp/mips64/op_const_16.S
deleted file mode 100644
index 51e68a7..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_4.S b/runtime/interpreter/mterp/mips64/op_const_4.S
deleted file mode 100644
index 0a58bff..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_4.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const/4 vA, #+B */
- ext a2, rINST, 8, 4 # a2 <- A
- seh a0, rINST # sign extend B in rINST
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- sra a0, a0, 12 # shift B into its final position
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- +B
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S
deleted file mode 100644
index 3f0c716..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_high16.S b/runtime/interpreter/mterp/mips64/op_const_high16.S
deleted file mode 100644
index 43effb6..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const/high16 vAA, #+BBBB0000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- sll a0, a0, 16 # a0 <- BBBB0000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB0000
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_handle.S b/runtime/interpreter/mterp/mips64/op_const_method_handle.S
deleted file mode 100644
index 43584d1..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_method_type.S b/runtime/interpreter/mterp/mips64/op_const_method_type.S
deleted file mode 100644
index 553b284..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S
deleted file mode 100644
index 96cbb5a..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
deleted file mode 100644
index 47f2101..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* const/string vAA, String//BBBBBBBB */
- .extern MterpConstString
- EXPORT_PC
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a4, 4(rPC) # a4 <- BBBB (high)
- srl a1, rINST, 8 # a1 <- AA
- ins a0, a4, 16, 16 # a0 <- BBBBbbbb
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 3 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide.S b/runtime/interpreter/mterp/mips64/op_const_wide.S
deleted file mode 100644
index f7eaf7c..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- srl a4, rINST, 8 # a4 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (low middle)
- lh a2, 6(rPC) # a2 <- hhhh (high middle)
- lh a3, 8(rPC) # a3 <- HHHH (high)
- FETCH_ADVANCE_INST 5 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- ins a2, a3, 16, 16 # a2 = HHHHhhhh
- dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_16.S b/runtime/interpreter/mterp/mips64/op_const_wide_16.S
deleted file mode 100644
index 3a70937..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const-wide/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_32.S b/runtime/interpreter/mterp/mips64/op_const_wide_32.S
deleted file mode 100644
index 867197c..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_32.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* const-wide/32 vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
deleted file mode 100644
index d741631..0000000
--- a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- dsll32 a0, a0, 16 # a0 <- BBBB000000000000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_div_double.S b/runtime/interpreter/mterp/mips64/op_div_double.S
deleted file mode 100644
index 44998f0..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
deleted file mode 100644
index 396af79..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"div.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float.S b/runtime/interpreter/mterp/mips64/op_div_float.S
deleted file mode 100644
index 7b09d52..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
deleted file mode 100644
index e74fdda..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"div.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int.S b/runtime/interpreter/mterp/mips64/op_div_int.S
deleted file mode 100644
index fb04acb..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
deleted file mode 100644
index db29b84..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
deleted file mode 100644
index e903dde..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
deleted file mode 100644
index 0559605..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long.S b/runtime/interpreter/mterp/mips64/op_div_long.S
deleted file mode 100644
index 01fc2b2..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
deleted file mode 100644
index 9627ab8..0000000
--- a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_float.S b/runtime/interpreter/mterp/mips64/op_double_to_float.S
deleted file mode 100644
index 2b2acee..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- cvt.s.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S
deleted file mode 100644
index d099522..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_int.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- trunc.w.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S
deleted file mode 100644
index 9b65da5..0000000
--- a/runtime/interpreter/mterp/mips64/op_double_to_long.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- trunc.l.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_fill_array_data.S b/runtime/interpreter/mterp/mips64/op_fill_array_data.S
deleted file mode 100644
index c90f0b9..0000000
--- a/runtime/interpreter/mterp/mips64/op_fill_array_data.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- .extern MterpFillArrayData
- EXPORT_PC
- lh a1, 2(rPC) # a1 <- bbbb (lo)
- lh a0, 4(rPC) # a0 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a1, a0, 16, 16 # a1 <- BBBBbbbb
- GET_VREG_U a0, a3 # a0 <- vAA (array object)
- dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
- jal MterpFillArrayData # (obj, payload)
- beqzc v0, MterpPossibleException # exception?
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array.S b/runtime/interpreter/mterp/mips64/op_filled_new_array.S
deleted file mode 100644
index 35f55c2..0000000
--- a/runtime/interpreter/mterp/mips64/op_filled_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern $helper
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rSELF
- jal $helper
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
deleted file mode 100644
index a4e18f6..0000000
--- a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_double.S b/runtime/interpreter/mterp/mips64/op_float_to_double.S
deleted file mode 100644
index 6accfee..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- cvt.d.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S
deleted file mode 100644
index 2806973..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_int.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- trunc.w.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S
deleted file mode 100644
index c40c8a6..0000000
--- a/runtime/interpreter/mterp/mips64/op_float_to_long.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- trunc.l.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_goto.S b/runtime/interpreter/mterp/mips64/op_goto.S
deleted file mode 100644
index 68fc83d..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- srl rINST, rINST, 8
- seb rINST, rINST # rINST <- offset (sign-extended AA)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_goto_16.S b/runtime/interpreter/mterp/mips64/op_goto_16.S
deleted file mode 100644
index ae56066..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_goto_32.S b/runtime/interpreter/mterp/mips64/op_goto_32.S
deleted file mode 100644
index 498b6d6..0000000
--- a/runtime/interpreter/mterp/mips64/op_goto_32.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- lh rINST, 2(rPC) # rINST <- aaaa (low)
- lh a1, 4(rPC) # a1 <- AAAA (high)
- ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa)
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_if_eq.S b/runtime/interpreter/mterp/mips64/op_if_eq.S
deleted file mode 100644
index aa35cad..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_eqz.S b/runtime/interpreter/mterp/mips64/op_if_eqz.S
deleted file mode 100644
index 0fe3418..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"eq" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ge.S b/runtime/interpreter/mterp/mips64/op_if_ge.S
deleted file mode 100644
index 59fdcc5..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gez.S b/runtime/interpreter/mterp/mips64/op_if_gez.S
deleted file mode 100644
index 57f1f66..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"ge" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gt.S b/runtime/interpreter/mterp/mips64/op_if_gt.S
deleted file mode 100644
index 26cc119..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_gtz.S b/runtime/interpreter/mterp/mips64/op_if_gtz.S
deleted file mode 100644
index 69fcacb..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"gt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_le.S b/runtime/interpreter/mterp/mips64/op_if_le.S
deleted file mode 100644
index a7fce17..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lez.S b/runtime/interpreter/mterp/mips64/op_if_lez.S
deleted file mode 100644
index f3edcc6..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"le" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_lt.S b/runtime/interpreter/mterp/mips64/op_if_lt.S
deleted file mode 100644
index a975a31..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ltz.S b/runtime/interpreter/mterp/mips64/op_if_ltz.S
deleted file mode 100644
index c1d730d..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"lt" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_ne.S b/runtime/interpreter/mterp/mips64/op_if_ne.S
deleted file mode 100644
index f143ee9..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/bincmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_if_nez.S b/runtime/interpreter/mterp/mips64/op_if_nez.S
deleted file mode 100644
index 1856b96..0000000
--- a/runtime/interpreter/mterp/mips64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/zcmp.S" { "condition":"ne" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S
deleted file mode 100644
index a8ce94c..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern $helper
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal $helper
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if $is_object
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S
deleted file mode 100644
index dc2a42a..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
deleted file mode 100644
index 979dc70..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lbu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S
deleted file mode 100644
index c5bf650..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
deleted file mode 100644
index cb35556..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S
deleted file mode 100644
index 3bf0c5a..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
deleted file mode 100644
index 6034567..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lhu" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S
deleted file mode 100644
index 23fa187..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
deleted file mode 100644
index 171d543..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S
+++ /dev/null
@@ -1,16 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- .extern artIGetObjectFromMterp
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- object we're operating on
- jal artIGetObjectFromMterp # (obj, offset)
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_quick.S b/runtime/interpreter/mterp/mips64/op_iget_quick.S
deleted file mode 100644
index fee6ab7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "load":"lw" }
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- $load a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S
deleted file mode 100644
index a9927fc..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
deleted file mode 100644
index 6e152db..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iget_quick.S" { "load":"lh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S
deleted file mode 100644
index 08bf544..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- .extern MterpIGetU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetU64
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- SET_VREG_WIDE v0, a2 # fp[A] <- v0
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
deleted file mode 100644
index 2adc6ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* iget-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a4, 2(rPC) # a4 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- daddu a4, a3, a4 # create direct pointer
- lw a0, 0(a4)
- lw a1, 4(a4)
- dinsu a0, a1, 32, 32
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG_WIDE a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_instance_of.S b/runtime/interpreter/mterp/mips64/op_instance_of.S
deleted file mode 100644
index 39a5dc7..0000000
--- a/runtime/interpreter/mterp/mips64/op_instance_of.S
+++ /dev/null
@@ -1,23 +0,0 @@
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- .extern MterpInstanceOf
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- CCCC
- srl a1, rINST, 12 # a1 <- B
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpInstanceOf # (index, &obj, method, self)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a1, MterpException
- ADVANCE 2 # advance rPC
- SET_VREG v0, a2 # vA <- v0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_byte.S b/runtime/interpreter/mterp/mips64/op_int_to_byte.S
deleted file mode 100644
index 1993e07..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"seb a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_char.S b/runtime/interpreter/mterp/mips64/op_int_to_char.S
deleted file mode 100644
index 8f03acd..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"and a0, a0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_double.S b/runtime/interpreter/mterp/mips64/op_int_to_double.S
deleted file mode 100644
index 6df71be..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- cvt.d.w f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_float.S b/runtime/interpreter/mterp/mips64/op_int_to_float.S
deleted file mode 100644
index 77e9eba..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- cvt.s.w f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_long.S b/runtime/interpreter/mterp/mips64/op_int_to_long.S
deleted file mode 100644
index 7b9ad86..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* int-to-long vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_int_to_short.S b/runtime/interpreter/mterp/mips64/op_int_to_short.S
deleted file mode 100644
index 4a3f234..0000000
--- a/runtime/interpreter/mterp/mips64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"seh a0, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom.S b/runtime/interpreter/mterp/mips64/op_invoke_custom.S
deleted file mode 100644
index 964253d..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S b/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
deleted file mode 100644
index e6585e3..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct.S b/runtime/interpreter/mterp/mips64/op_invoke_direct.S
deleted file mode 100644
index 5047118..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
deleted file mode 100644
index 5c9b95f..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface.S b/runtime/interpreter/mterp/mips64/op_invoke_interface.S
deleted file mode 100644
index ed148ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeInterface" }
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
deleted file mode 100644
index 91c231e..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
deleted file mode 100644
index d9324d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 8e0ecb5..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static.S b/runtime/interpreter/mterp/mips64/op_invoke_static.S
deleted file mode 100644
index 44f5cb7..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_static.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeStatic" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
deleted file mode 100644
index 289e5aa..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super.S b/runtime/interpreter/mterp/mips64/op_invoke_super.S
deleted file mode 100644
index b13fffe..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeSuper" }
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
deleted file mode 100644
index 350b975..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
deleted file mode 100644
index 0d26cda..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtual" }
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
deleted file mode 100644
index f39562c..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
deleted file mode 100644
index 0bb43f8..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index c448851..0000000
--- a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S
deleted file mode 100644
index 9a789e6..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "helper":"MterpIPutU32" }
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern $helper
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal $helper
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S
deleted file mode 100644
index 8e1d083..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
deleted file mode 100644
index df99948..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S
deleted file mode 100644
index ce3b614..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
deleted file mode 100644
index df99948..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sb" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S
deleted file mode 100644
index 1d587fa..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
deleted file mode 100644
index a6286b7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S
deleted file mode 100644
index dd1938e..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_object.S
+++ /dev/null
@@ -1,11 +0,0 @@
- .extern MterpIPutObj
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpIPutObj
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
deleted file mode 100644
index 658ef42..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S
+++ /dev/null
@@ -1,10 +0,0 @@
- .extern MterpIputObjectQuick
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpIputObjectQuick
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_quick.S b/runtime/interpreter/mterp/mips64/op_iput_quick.S
deleted file mode 100644
index b95adfc..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "store":"sw" }
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- $store a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S
deleted file mode 100644
index dd68bbe..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
deleted file mode 100644
index a6286b7..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_iput_quick.S" { "store":"sh" }
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S
deleted file mode 100644
index 6272690..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* iput-wide vA, vB, field//CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutU64
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
deleted file mode 100644
index 95a8ad8..0000000
--- a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* iput-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a3, 2(rPC) # a3 <- field byte offset
- GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
- ext a0, rINST, 8, 4 # a0 <- A
- beqz a2, common_errNullObject # object was null
- GET_VREG_WIDE a0, a0 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a2, a3 # create a direct pointer
- sw a0, 0(a1)
- dsrl32 a0, a0, 0
- sw a0, 4(a1)
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_double.S b/runtime/interpreter/mterp/mips64/op_long_to_double.S
deleted file mode 100644
index 8503e76..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_double.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- cvt.d.l f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_float.S b/runtime/interpreter/mterp/mips64/op_long_to_float.S
deleted file mode 100644
index 31f5c0e..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_float.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- cvt.s.l f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_long_to_int.S b/runtime/interpreter/mterp/mips64/op_long_to_int.S
deleted file mode 100644
index 4ef4b51..0000000
--- a/runtime/interpreter/mterp/mips64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "mips64/op_move.S"
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_enter.S b/runtime/interpreter/mterp/mips64/op_monitor_enter.S
deleted file mode 100644
index 36ae503..0000000
--- a/runtime/interpreter/mterp/mips64/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- .extern artLockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artLockObjectFromCode
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_monitor_exit.S b/runtime/interpreter/mterp/mips64/op_monitor_exit.S
deleted file mode 100644
index 9945952..0000000
--- a/runtime/interpreter/mterp/mips64/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- .extern artUnlockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move.S b/runtime/interpreter/mterp/mips64/op_move.S
deleted file mode 100644
index c79f6cd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_16.S b/runtime/interpreter/mterp/mips64/op_move_16.S
deleted file mode 100644
index 9d5c4dc..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAAAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_exception.S b/runtime/interpreter/mterp/mips64/op_move_exception.S
deleted file mode 100644
index d226718..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_exception.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* move-exception vAA */
- srl a2, rINST, 8 # a2 <- AA
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- SET_VREG_OBJECT a0, a2 # vAA <- exception obj
- GET_INST_OPCODE v0 # extract opcode from rINST
- sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_from16.S b/runtime/interpreter/mterp/mips64/op_move_from16.S
deleted file mode 100644
index 6d6bde0..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_from16.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_object.S b/runtime/interpreter/mterp/mips64/op_move_object.S
deleted file mode 100644
index 47e0272..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_16.S b/runtime/interpreter/mterp/mips64/op_move_object_16.S
deleted file mode 100644
index a777dcd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_object_from16.S b/runtime/interpreter/mterp/mips64/op_move_object_from16.S
deleted file mode 100644
index ab55ebd..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result.S b/runtime/interpreter/mterp/mips64/op_move_result.S
deleted file mode 100644
index 1ec28cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result.S
+++ /dev/null
@@ -1,14 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if $is_object
- SET_VREG_OBJECT a0, a2 # vAA <- result
- .else
- SET_VREG a0, a2 # vAA <- result
- .endif
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_object.S b/runtime/interpreter/mterp/mips64/op_move_result_object.S
deleted file mode 100644
index e76bc22..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_move_result_wide.S b/runtime/interpreter/mterp/mips64/op_move_result_wide.S
deleted file mode 100644
index 3ba0d72..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_result_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* for: move-result-wide */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- ld a0, 0(a0) # a0 <- result.j
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide.S b/runtime/interpreter/mterp/mips64/op_move_wide.S
deleted file mode 100644
index ea23f87..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- ext a3, rINST, 12, 4 # a3 <- B
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG_WIDE a0, a3 # a0 <- vB
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_16.S b/runtime/interpreter/mterp/mips64/op_move_wide_16.S
deleted file mode 100644
index 8ec6068..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide_16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
deleted file mode 100644
index 11d5603..0000000
--- a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double.S b/runtime/interpreter/mterp/mips64/op_mul_double.S
deleted file mode 100644
index e7e17f7..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
deleted file mode 100644
index f404d46..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"mul.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float.S b/runtime/interpreter/mterp/mips64/op_mul_float.S
deleted file mode 100644
index 9a695fc..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
deleted file mode 100644
index a134a34..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"mul.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int.S b/runtime/interpreter/mterp/mips64/op_mul_int.S
deleted file mode 100644
index e1b90ff..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
deleted file mode 100644
index c0c4063..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
deleted file mode 100644
index bb4fff8..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
deleted file mode 100644
index da11ea9..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long.S b/runtime/interpreter/mterp/mips64/op_mul_long.S
deleted file mode 100644
index ec32850..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
deleted file mode 100644
index eb50cda..0000000
--- a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dmul a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_double.S b/runtime/interpreter/mterp/mips64/op_neg_double.S
deleted file mode 100644
index a135d61..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_double.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" }
- neg.d f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_float.S b/runtime/interpreter/mterp/mips64/op_neg_float.S
deleted file mode 100644
index 78019f0..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_float.S
+++ /dev/null
@@ -1,3 +0,0 @@
-%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" }
- neg.s f0, f0
-%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" }
diff --git a/runtime/interpreter/mterp/mips64/op_neg_int.S b/runtime/interpreter/mterp/mips64/op_neg_int.S
deleted file mode 100644
index 31538c0..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"subu a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_neg_long.S b/runtime/interpreter/mterp/mips64/op_neg_long.S
deleted file mode 100644
index bc80d06..0000000
--- a/runtime/interpreter/mterp/mips64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unopWide.S" {"instr":"dsubu a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_new_array.S b/runtime/interpreter/mterp/mips64/op_new_array.S
deleted file mode 100644
index d78b4ac..0000000
--- a/runtime/interpreter/mterp/mips64/op_new_array.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- .extern MterpNewArray
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpNewArray
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_new_instance.S b/runtime/interpreter/mterp/mips64/op_new_instance.S
deleted file mode 100644
index cc5e13e..0000000
--- a/runtime/interpreter/mterp/mips64/op_new_instance.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- .extern MterpNewInstance
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- jal MterpNewInstance # (shadow_frame, self, inst_data)
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_nop.S b/runtime/interpreter/mterp/mips64/op_nop.S
deleted file mode 100644
index cc803a7..0000000
--- a/runtime/interpreter/mterp/mips64/op_nop.S
+++ /dev/null
@@ -1,3 +0,0 @@
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_not_int.S b/runtime/interpreter/mterp/mips64/op_not_int.S
deleted file mode 100644
index 5954095..0000000
--- a/runtime/interpreter/mterp/mips64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unop.S" {"instr":"nor a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_not_long.S b/runtime/interpreter/mterp/mips64/op_not_long.S
deleted file mode 100644
index c8f5da7..0000000
--- a/runtime/interpreter/mterp/mips64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unopWide.S" {"instr":"nor a0, zero, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int.S b/runtime/interpreter/mterp/mips64/op_or_int.S
deleted file mode 100644
index 0102355..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
deleted file mode 100644
index eed8900..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
deleted file mode 100644
index 16a0f3e..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
deleted file mode 100644
index dbbf790..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long.S b/runtime/interpreter/mterp/mips64/op_or_long.S
deleted file mode 100644
index e6f8639..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
deleted file mode 100644
index ad5e6c8..0000000
--- a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S
deleted file mode 100644
index 44e77a4..0000000
--- a/runtime/interpreter/mterp/mips64/op_packed_switch.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBBBBBB */
- .extern $func
- lh a0, 2(rPC) # a0 <- bbbb (lo)
- lh a1, 4(rPC) # a1 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a0, a1, 16, 16 # a0 <- BBBBbbbb
- GET_VREG a1, a3 # a1 <- vAA
- dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
- jal $func # v0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double.S b/runtime/interpreter/mterp/mips64/op_rem_double.S
deleted file mode 100644
index ba61cfd..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_double.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* rem-double vAA, vBB, vCC */
- .extern fmod
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f12, a2 # f12 <- vBB
- GET_VREG_DOUBLE f13, a3 # f13 <- vCC
- jal fmod # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
deleted file mode 100644
index c649f0d..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* rem-double/2addr vA, vB */
- .extern fmod
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f12, a2 # f12 <- vA
- GET_VREG_DOUBLE f13, a3 # f13 <- vB
- jal fmod # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float.S b/runtime/interpreter/mterp/mips64/op_rem_float.S
deleted file mode 100644
index 3967b0b..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_float.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* rem-float vAA, vBB, vCC */
- .extern fmodf
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f12, a2 # f12 <- vBB
- GET_VREG_FLOAT f13, a3 # f13 <- vCC
- jal fmodf # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
deleted file mode 100644
index 3fed41e..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* rem-float/2addr vA, vB */
- .extern fmodf
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f12, a2 # f12 <- vA
- GET_VREG_FLOAT f13, a3 # f13 <- vB
- jal fmodf # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int.S b/runtime/interpreter/mterp/mips64/op_rem_int.S
deleted file mode 100644
index c05e9c4..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
deleted file mode 100644
index a4e162d..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
deleted file mode 100644
index 3284f14..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
deleted file mode 100644
index 1e6a584..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long.S b/runtime/interpreter/mterp/mips64/op_rem_long.S
deleted file mode 100644
index 32b2d19..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
deleted file mode 100644
index ad658e1..0000000
--- a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"}
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
deleted file mode 100644
index edd795f..0000000
--- a/runtime/interpreter/mterp/mips64/op_return.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"instr":"GET_VREG"}
- /*
- * Return a 32-bit value.
- *
- * for: return (sign-extend), return-object (zero-extend)
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- $instr a0, a2 # a0 <- vAA
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
deleted file mode 100644
index b69b880..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_return.S" {"instr":"GET_VREG_U"}
diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S
deleted file mode 100644
index f6eee91..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
deleted file mode 100644
index 4e9b640..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
- .extern MterpSuspendCheck
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S
deleted file mode 100644
index 91ca1fa..0000000
--- a/runtime/interpreter/mterp/mips64/op_return_wide.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vAA
- b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int.S b/runtime/interpreter/mterp/mips64/op_rsub_int.S
deleted file mode 100644
index fa31a0a..0000000
--- a/runtime/interpreter/mterp/mips64/op_rsub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
deleted file mode 100644
index c31ff32..0000000
--- a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S
deleted file mode 100644
index b7b0382..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32", "extend":"" }
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern $helper
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal $helper
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
- $extend
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if $is_object
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S
deleted file mode 100644
index fe2deb1..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetU8", "extend":"and v0, v0, 0xff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S
deleted file mode 100644
index a7e2bef..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetI8", "extend":"seb v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S
deleted file mode 100644
index ed86f32..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetU16", "extend":"and v0, v0, 0xffff"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S
deleted file mode 100644
index 3b260e6..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S
deleted file mode 100644
index f708a20..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sget.S" {"helper":"MterpSGetI16", "extend":"seh v0, v0"}
diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S
deleted file mode 100644
index 7c31252..0000000
--- a/runtime/interpreter/mterp/mips64/op_sget_wide.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field//BBBB */
- .extern MterpSGetU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetU64
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a4, rINST, 8 # a4 <- AA
- bnez a3, MterpException # bail out
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG_WIDE v0, a4
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int.S b/runtime/interpreter/mterp/mips64/op_shl_int.S
deleted file mode 100644
index 784481f..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
deleted file mode 100644
index a6c8a78..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
deleted file mode 100644
index 36ef207..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"sll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long.S b/runtime/interpreter/mterp/mips64/op_shl_long.S
deleted file mode 100644
index 225a2cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
deleted file mode 100644
index c04d882..0000000
--- a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsll a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int.S b/runtime/interpreter/mterp/mips64/op_shr_int.S
deleted file mode 100644
index eded037..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
deleted file mode 100644
index 5b4d96f..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
deleted file mode 100644
index 175eb86..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"sra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long.S b/runtime/interpreter/mterp/mips64/op_shr_long.S
deleted file mode 100644
index 0db38c8..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
deleted file mode 100644
index 48131ad..0000000
--- a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsra a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sparse_switch.S b/runtime/interpreter/mterp/mips64/op_sparse_switch.S
deleted file mode 100644
index b065aaa..0000000
--- a/runtime/interpreter/mterp/mips64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S
deleted file mode 100644
index 28b8c3e..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"MterpSPutU32" }
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern $helper
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal $helper
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S
deleted file mode 100644
index 2e769d5..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S
deleted file mode 100644
index 0b04b59..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S
deleted file mode 100644
index 4a80375..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S
deleted file mode 100644
index ff43967..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_object.S
+++ /dev/null
@@ -1,11 +0,0 @@
- .extern MterpSPutObj
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpSPutObj
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S
deleted file mode 100644
index c00043b..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S
deleted file mode 100644
index bfb6983..0000000
--- a/runtime/interpreter/mterp/mips64/op_sput_wide.S
+++ /dev/null
@@ -1,18 +0,0 @@
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field//BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a1, rINST, 8 # a2 <- AA
- dlsa a1, a1, rFP, 2
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutU64
- bnezc v0, MterpException # 0 on success, -1 on failure
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double.S b/runtime/interpreter/mterp/mips64/op_sub_double.S
deleted file mode 100644
index 40a6c89..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
deleted file mode 100644
index 984737e..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinopWide2addr.S" {"instr":"sub.d f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float.S b/runtime/interpreter/mterp/mips64/op_sub_float.S
deleted file mode 100644
index 9010592..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
deleted file mode 100644
index e7d4ffe..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/fbinop2addr.S" {"instr":"sub.s f0, f0, f1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int.S b/runtime/interpreter/mterp/mips64/op_sub_int.S
deleted file mode 100644
index 609ea05..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
deleted file mode 100644
index ba2f1e8..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long.S b/runtime/interpreter/mterp/mips64/op_sub_long.S
deleted file mode 100644
index 09a6afd..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
deleted file mode 100644
index b9ec82a..0000000
--- a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsubu a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_throw.S b/runtime/interpreter/mterp/mips64/op_throw.S
deleted file mode 100644
index 6418d57..0000000
--- a/runtime/interpreter/mterp/mips64/op_throw.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (exception object)
- beqzc a0, common_errNullObject
- sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3e.S b/runtime/interpreter/mterp/mips64/op_unused_3e.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_3f.S b/runtime/interpreter/mterp/mips64/op_unused_3f.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_40.S b/runtime/interpreter/mterp/mips64/op_unused_40.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_41.S b/runtime/interpreter/mterp/mips64/op_unused_41.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_42.S b/runtime/interpreter/mterp/mips64/op_unused_42.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_43.S b/runtime/interpreter/mterp/mips64/op_unused_43.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_79.S b/runtime/interpreter/mterp/mips64/op_unused_79.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_7a.S b/runtime/interpreter/mterp/mips64/op_unused_7a.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f3.S b/runtime/interpreter/mterp/mips64/op_unused_f3.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f4.S b/runtime/interpreter/mterp/mips64/op_unused_f4.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f5.S b/runtime/interpreter/mterp/mips64/op_unused_f5.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f6.S b/runtime/interpreter/mterp/mips64/op_unused_f6.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f7.S b/runtime/interpreter/mterp/mips64/op_unused_f7.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f8.S b/runtime/interpreter/mterp/mips64/op_unused_f8.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_f9.S b/runtime/interpreter/mterp/mips64/op_unused_f9.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fc.S b/runtime/interpreter/mterp/mips64/op_unused_fc.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_unused_fd.S b/runtime/interpreter/mterp/mips64/op_unused_fd.S
deleted file mode 100644
index 29463d7..0000000
--- a/runtime/interpreter/mterp/mips64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/unused.S"
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int.S b/runtime/interpreter/mterp/mips64/op_ushr_int.S
deleted file mode 100644
index 37c90cb..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
deleted file mode 100644
index d6bf413..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
deleted file mode 100644
index 2a2d843..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"srl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long.S b/runtime/interpreter/mterp/mips64/op_ushr_long.S
deleted file mode 100644
index e724405..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
deleted file mode 100644
index d2cf135..0000000
--- a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"dsrl a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int.S b/runtime/interpreter/mterp/mips64/op_xor_int.S
deleted file mode 100644
index ee25ebc..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
deleted file mode 100644
index 0f04967..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
deleted file mode 100644
index ecb21ae..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
deleted file mode 100644
index 115ae99..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long.S b/runtime/interpreter/mterp/mips64/op_xor_long.S
deleted file mode 100644
index 7ebabc2..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
deleted file mode 100644
index 0f1919a..0000000
--- a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "mips64/binopWide2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/runtime/interpreter/mterp/mips64/other.S b/runtime/interpreter/mterp/mips64/other.S
new file mode 100644
index 0000000..789efee
--- /dev/null
+++ b/runtime/interpreter/mterp/mips64/other.S
@@ -0,0 +1,355 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ lhu a0, 2(rPC) # a0 <- BBBB
+ srl a1, rINST, 8 # a1 <- AA
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal $helper # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 2 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 2 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ b MterpFallback
+
+%def op_const():
+ /* const vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_16():
+ /* const/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_4():
+ /* const/4 vA, #+B */
+ ext a2, rINST, 8, 4 # a2 <- A
+ seh a0, rINST # sign extend B in rINST
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ sra a0, a0, 12 # shift B into its final position
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vA <- +B
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, #+BBBB0000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ sll a0, a0, 16 # a0 <- BBBB0000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG a0, a2 # vAA <- +BBBB0000
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, String//BBBBBBBB */
+ .extern MterpConstString
+ EXPORT_PC
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a4, 4(rPC) # a4 <- BBBB (high)
+ srl a1, rINST, 8 # a1 <- AA
+ ins a0, a4, 16, 16 # a0 <- BBBBbbbb
+ daddu a2, rFP, OFF_FP_SHADOWFRAME
+ move a3, rSELF
+ jal MterpConstString # (index, tgt_reg, shadow_frame, self)
+ PREFETCH_INST 3 # load rINST
+ bnez v0, MterpPossibleException # let reference interpreter deal with it.
+ ADVANCE 3 # advance rPC
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_wide():
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ srl a4, rINST, 8 # a4 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (low middle)
+ lh a2, 6(rPC) # a2 <- hhhh (high middle)
+ lh a3, 8(rPC) # a3 <- HHHH (high)
+ FETCH_ADVANCE_INST 5 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ ins a2, a3, 16, 16 # a2 = HHHHhhhh
+ dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, #+BBBB */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- sign-extended BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- bbbb (low)
+ lh a1, 4(rPC) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ ins a0, a1, 16, 16 # a0 = BBBBbbbb
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ srl a2, rINST, 8 # a2 <- AA
+ lh a0, 2(rPC) # a0 <- BBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ dsll32 a0, a0, 16 # a0 <- BBBB000000000000
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_monitor_enter():
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ .extern artLockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artLockObjectFromCode
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_monitor_exit():
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ .extern artUnlockObjectFromCode
+ EXPORT_PC
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_U a0, a2 # a0 <- vAA (object)
+ move a1, rSELF # a1 <- self
+ jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
+ bnezc v0, MterpException
+ FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ ext a2, rINST, 8, 4 # a2 <- A
+ ext a3, rINST, 12, 4 # a3 <- B
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vA <- vB
+ .else
+ SET_VREG a0, a2 # vA <- vB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAAAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_exception():
+ /* move-exception vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ SET_VREG_OBJECT a0, a2 # vAA <- exception obj
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_VREG a0, a3 # a0 <- vBBBB
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
+ .else
+ SET_VREG a0, a2 # vAA <- vBBBB
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ lw a0, 0(a0) # a0 <- result.i
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ .if $is_object
+ SET_VREG_OBJECT a0, a2 # vAA <- result
+ .else
+ SET_VREG a0, a2 # vAA <- result
+ .endif
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* for: move-result-wide */
+ /* op vAA */
+ srl a2, rINST, 8 # a2 <- AA
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
+ ld a0, 0(a0) # a0 <- result.j
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- result
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ ext a3, rINST, 12, 4 # a3 <- B
+ ext a2, rINST, 8, 4 # a2 <- A
+ GET_VREG_WIDE a0, a3 # a0 <- vB
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vA <- vB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 4(rPC) # a3 <- BBBB
+ lhu a2, 2(rPC) # a2 <- AAAA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 3 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ lhu a3, 2(rPC) # a3 <- BBBB
+ srl a2, rINST, 8 # a2 <- AA
+ GET_VREG_WIDE a0, a3 # a0 <- vBBBB
+ FETCH_ADVANCE_INST 2 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ SET_VREG_WIDE a0, a2 # vAA <- vBBBB
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_nop():
+ FETCH_ADVANCE_INST 1 # advance rPC, load rINST
+ GET_INST_OPCODE v0 # extract opcode from rINST
+ GOTO_OPCODE v0 # jump to next instruction
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/mips64/unop.S b/runtime/interpreter/mterp/mips64/unop.S
deleted file mode 100644
index e3f7ea0..0000000
--- a/runtime/interpreter/mterp/mips64/unop.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- $preinstr # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- $instr # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unopWide.S b/runtime/interpreter/mterp/mips64/unopWide.S
deleted file mode 100644
index c0dd1aa..0000000
--- a/runtime/interpreter/mterp/mips64/unopWide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"preinstr":""}
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * For: not-long, neg-long
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- $preinstr # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- $instr # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/unused.S b/runtime/interpreter/mterp/mips64/unused.S
deleted file mode 100644
index 30d38bd..0000000
--- a/runtime/interpreter/mterp/mips64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
diff --git a/runtime/interpreter/mterp/mips64/zcmp.S b/runtime/interpreter/mterp/mips64/zcmp.S
deleted file mode 100644
index 75db49e..0000000
--- a/runtime/interpreter/mterp/mips64/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- b${condition}zc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 7b37c9a..be985ff 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -34,11 +34,11 @@
void CheckMterpAsmConstants() {
/*
* If we're using computed goto instruction transitions, make sure
- * none of the handlers overflows the 128-byte limit. This won't tell
+ * none of the handlers overflows the byte limit. This won't tell
* which one did, but if any one is too big the total size will
* overflow.
*/
- const int width = 128;
+ const int width = kMterpHandlerSize;
int interp_size = (uintptr_t) artMterpAsmInstructionEnd -
(uintptr_t) artMterpAsmInstructionStart;
if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
@@ -48,11 +48,7 @@
}
void InitMterpTls(Thread* self) {
- self->SetMterpDefaultIBase(artMterpAsmInstructionStart);
- self->SetMterpAltIBase(artMterpAsmAltInstructionStart);
- self->SetMterpCurrentIBase((kTraceExecutionEnabled || kTestExportPC) ?
- artMterpAsmAltInstructionStart :
- artMterpAsmInstructionStart);
+ self->SetMterpCurrentIBase(artMterpAsmInstructionStart);
}
/*
@@ -152,6 +148,11 @@
const instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation();
return instrumentation->NonJitProfilingActive() ||
Dbg::IsDebuggerActive() ||
+ // mterp only knows how to deal with the normal exits. It cannot handle any of the
+ // non-standard force-returns.
+ // TODO We really only need to switch interpreters if a PopFrame has actually happened. We
+ // should check this here.
+ UNLIKELY(runtime->AreNonStandardExitsEnabled()) ||
// An async exception has been thrown. We need to go to the switch interpreter. MTerp doesn't
// know how to deal with these so we could end up never dealing with it if we are in an
// infinite loop. Since this can be called in a tight loop and getting the current thread
@@ -172,7 +173,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoFastInvoke<kVirtual>(
+ return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -183,7 +184,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kSuper, false, false>(
+ return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -194,7 +195,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kInterface, false, false>(
+ return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -205,7 +206,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoFastInvoke<kDirect>(
+ return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -216,7 +217,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoFastInvoke<kStatic>(
+ return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -249,7 +250,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kVirtual, true, false>(
+ return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -260,7 +261,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kSuper, true, false>(
+ return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -271,7 +272,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kInterface, true, false>(
+ return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -282,7 +283,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kDirect, true, false>(
+ return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -293,7 +294,7 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kStatic, true, false>(
+ return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -490,24 +491,6 @@
return true;
}
-extern "C" size_t MterpSPutObj(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
- uint32_t inst_data, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
- (self, *shadow_frame, inst, inst_data);
-}
-
-extern "C" size_t MterpIPutObj(ShadowFrame* shadow_frame,
- uint16_t* dex_pc_ptr,
- uint32_t inst_data,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
- (self, *shadow_frame, inst, inst_data);
-}
-
extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
@@ -681,352 +664,195 @@
return MterpShouldSwitchInterpreters();
}
-template<typename PrimType, typename RetType, typename Getter, FindFieldType kType>
-NO_INLINE RetType artGetInstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object.
- ArtField* field = FindFieldFromCode<kType, /* access_checks */ false>(
- field_idx, referrer, self, sizeof(PrimType));
- if (UNLIKELY(field == nullptr)) {
- return 0; // Will throw exception by checking with Thread::Current.
+// Execute single field access instruction (get/put, static/instance).
+// The template arguments reduce this to fairly small amount of code.
+// It requires the target object and field to be already resolved.
+template<typename PrimType, FindFieldType kAccessType>
+ALWAYS_INLINE void MterpFieldAccess(Instruction* inst,
+ uint16_t inst_data,
+ ShadowFrame* shadow_frame,
+ ObjPtr<mirror::Object> obj,
+ MemberOffset offset,
+ bool is_volatile)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(std::is_integral<PrimType>::value, "Unexpected primitive type");
+ constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
+ constexpr bool kIsPrimitive = (kAccessType & FindFieldFlags::PrimitiveBit) != 0;
+ constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
+
+ uint16_t vRegA = kIsStatic ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+ if (kIsPrimitive) {
+ if (kIsRead) {
+ PrimType value = UNLIKELY(is_volatile)
+ ? obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset)
+ : obj->GetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset);
+ if (sizeof(PrimType) == sizeof(uint64_t)) {
+ shadow_frame->SetVRegLong(vRegA, value); // Set two consecutive registers.
+ } else {
+ shadow_frame->SetVReg(vRegA, static_cast<int32_t>(value)); // Sign/zero extend.
+ }
+ } else { // Write.
+ uint64_t value = (sizeof(PrimType) == sizeof(uint64_t))
+ ? shadow_frame->GetVRegLong(vRegA)
+ : shadow_frame->GetVReg(vRegA);
+ if (UNLIKELY(is_volatile)) {
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ true>(offset, value);
+ } else {
+ obj->SetFieldPrimitive<PrimType, /*kIsVolatile*/ false>(offset, value);
+ }
+ }
+ } else { // Object.
+ if (kIsRead) {
+ ObjPtr<mirror::Object> value = UNLIKELY(is_volatile)
+ ? obj->GetFieldObjectVolatile<mirror::Object>(offset)
+ : obj->GetFieldObject<mirror::Object>(offset);
+ shadow_frame->SetVRegReference(vRegA, value);
+ } else { // Write.
+ ObjPtr<mirror::Object> value = shadow_frame->GetVRegReference(vRegA);
+ if (UNLIKELY(is_volatile)) {
+ obj->SetFieldObjectVolatile</*kTransactionActive*/ false>(offset, value);
+ } else {
+ obj->SetFieldObject</*kTransactionActive*/ false>(offset, value);
+ }
+ }
}
- if (UNLIKELY(h == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(field, /*is_read*/ true);
- return 0; // Will throw exception by checking with Thread::Current.
- }
- return Getter::Get(obj, field);
}
-template<typename PrimType, typename RetType, typename Getter>
-ALWAYS_INLINE RetType artGetInstanceFromMterpFast(uint32_t field_idx,
- mirror::Object* obj,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- constexpr bool kIsObject = std::is_same<RetType, mirror::Object*>::value;
- constexpr FindFieldType kType = kIsObject ? InstanceObjectRead : InstancePrimitiveRead;
+template<typename PrimType, FindFieldType kAccessType>
+NO_INLINE bool MterpFieldAccessSlow(Instruction* inst,
+ uint16_t inst_data,
+ ShadowFrame* shadow_frame,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
+ constexpr bool kIsRead = (kAccessType & FindFieldFlags::ReadBit) != 0;
+
+ // Update the dex pc in shadow frame, just in case anything throws.
+ shadow_frame->SetDexPCPtr(reinterpret_cast<uint16_t*>(inst));
+ ArtMethod* referrer = shadow_frame->GetMethod();
+ uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ field_idx, referrer, self, sizeof(PrimType));
+ if (UNLIKELY(field == nullptr)) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+ ObjPtr<mirror::Object> obj = kIsStatic
+ ? field->GetDeclaringClass().Ptr()
+ : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
+ if (UNLIKELY(obj == nullptr)) {
+ ThrowNullPointerExceptionForFieldAccess(field, kIsRead);
+ return false;
+ }
+ MterpFieldAccess<PrimType, kAccessType>(
+ inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
+ return true;
+}
+
+// This methods is called from assembly to handle field access instructions.
+//
+// This method is fairly hot. It is long, but it has been carefully optimized.
+// It contains only fully inlined methods -> no spills -> no prologue/epilogue.
+template<typename PrimType, FindFieldType kAccessType>
+ALWAYS_INLINE bool MterpFieldAccessFast(Instruction* inst,
+ uint16_t inst_data,
+ ShadowFrame* shadow_frame,
+ Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ constexpr bool kIsStatic = (kAccessType & FindFieldFlags::StaticBit) != 0;
+
+ // Try to find the field in small thread-local cache first.
+ InterpreterCache* tls_cache = self->GetInterpreterCache();
+ size_t tls_value;
+ if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+ // The meaning of the cache value is opcode-specific.
+ // It is ArtFiled* for static fields and the raw offset for instance fields.
+ size_t offset = kIsStatic
+ ? reinterpret_cast<ArtField*>(tls_value)->GetOffset().SizeValue()
+ : tls_value;
+ if (kIsDebugBuild) {
+ uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
+ ArtField* field = FindFieldFromCode<kAccessType, /* access_checks */ false>(
+ field_idx, shadow_frame->GetMethod(), self, sizeof(PrimType));
+ DCHECK_EQ(offset, field->GetOffset().SizeValue());
+ }
+ ObjPtr<mirror::Object> obj = kIsStatic
+ ? reinterpret_cast<ArtField*>(tls_value)->GetDeclaringClass()
+ : MakeObjPtr(shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data)));
+ if (LIKELY(obj != nullptr)) {
+ MterpFieldAccess<PrimType, kAccessType>(
+ inst, inst_data, shadow_frame, obj, MemberOffset(offset), /* is_volatile */ false);
+ return true;
+ }
+ }
// This effectively inlines the fast path from ArtMethod::GetDexCache.
- // It avoids non-inlined call which in turn allows elimination of the prologue and epilogue.
+ ArtMethod* referrer = shadow_frame->GetMethod();
if (LIKELY(!referrer->IsObsolete())) {
// Avoid read barriers, since we need only the pointer to the native (non-movable)
// DexCache field array which we can get even through from-space objects.
ObjPtr<mirror::Class> klass = referrer->GetDeclaringClass<kWithoutReadBarrier>();
mirror::DexCache* dex_cache = klass->GetDexCache<kDefaultVerifyFlags, kWithoutReadBarrier>();
+
// Try to find the desired field in DexCache.
+ uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr & obj != nullptr)) {
- if (kIsDebugBuild) {
- // Compare the fast path and slow path.
- StackHandleScope<1> hs(self);
- HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(&obj)); // GC might move the object.
- DCHECK_EQ(field, (FindFieldFromCode<kType, /* access_checks */ false>(
+ if (LIKELY(field != nullptr)) {
+ bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
+ if (LIKELY(initialized)) {
+ DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks */ false>(
field_idx, referrer, self, sizeof(PrimType))));
+ ObjPtr<mirror::Object> obj = kIsStatic
+ ? field->GetDeclaringClass().Ptr()
+ : shadow_frame->GetVRegReference(inst->VRegB_22c(inst_data));
+ if (LIKELY(kIsStatic || obj != nullptr)) {
+ // Only non-volatile fields are allowed in the thread-local cache.
+ if (LIKELY(!field->IsVolatile())) {
+ if (kIsStatic) {
+ tls_cache->Set(inst, reinterpret_cast<uintptr_t>(field));
+ } else {
+ tls_cache->Set(inst, field->GetOffset().SizeValue());
+ }
+ }
+ MterpFieldAccess<PrimType, kAccessType>(
+ inst, inst_data, shadow_frame, obj, field->GetOffset(), field->IsVolatile());
+ return true;
+ }
}
- return Getter::Get(obj, field);
}
}
+
// Slow path. Last and with identical arguments so that it becomes single instruction tail call.
- return artGetInstanceFromMterp<PrimType, RetType, Getter, kType>(field_idx, obj, referrer, self);
+ return MterpFieldAccessSlow<PrimType, kAccessType>(inst, inst_data, shadow_frame, self);
}
-#define ART_GET_FIELD_FROM_MTERP(Suffix, Kind, PrimType, RetType, Ptr) \
-extern "C" RetType MterpIGet ## Suffix(uint32_t field_idx, \
- mirror::Object* obj, \
- ArtMethod* referrer, \
- Thread* self) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- struct Getter { /* Specialize the field load depending on the field type */ \
- static RetType Get(mirror::Object* o, ArtField* f) REQUIRES_SHARED(Locks::mutator_lock_) { \
- return f->Get##Kind(o)Ptr; \
- } \
- }; \
- return artGetInstanceFromMterpFast<PrimType, RetType, Getter>(field_idx, obj, referrer, self); \
-} \
-
-ART_GET_FIELD_FROM_MTERP(I8, Byte, int8_t, ssize_t, )
-ART_GET_FIELD_FROM_MTERP(U8, Boolean, uint8_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(I16, Short, int16_t, ssize_t, )
-ART_GET_FIELD_FROM_MTERP(U16, Char, uint16_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(U32, 32, uint32_t, size_t, )
-ART_GET_FIELD_FROM_MTERP(U64, 64, uint64_t, uint64_t, )
-ART_GET_FIELD_FROM_MTERP(Obj, Obj, mirror::HeapReference<mirror::Object>, mirror::Object*, .Ptr())
-
-#undef ART_GET_FIELD_FROM_MTERP
-
-extern "C" ssize_t MterpIPutU8(uint32_t field_idx,
- mirror::Object* obj,
- uint8_t new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->SetBoolean<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
+#define MTERP_FIELD_ACCESSOR(Name, PrimType, AccessType) \
+extern "C" bool Name(Instruction* inst, uint16_t inst_data, ShadowFrame* sf, Thread* self) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ return MterpFieldAccessFast<PrimType, AccessType>(inst, inst_data, sf, self); \
}
-extern "C" ssize_t MterpIPutI8(uint32_t field_idx,
- mirror::Object* obj,
- uint8_t new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->SetByte<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
+#define MTERP_FIELD_ACCESSORS_FOR_TYPE(Sufix, PrimType, Kind) \
+ MTERP_FIELD_ACCESSOR(MterpIGet##Sufix, PrimType, Instance##Kind##Read) \
+ MTERP_FIELD_ACCESSOR(MterpIPut##Sufix, PrimType, Instance##Kind##Write) \
+ MTERP_FIELD_ACCESSOR(MterpSGet##Sufix, PrimType, Static##Kind##Read) \
+ MTERP_FIELD_ACCESSOR(MterpSPut##Sufix, PrimType, Static##Kind##Write)
-extern "C" ssize_t MterpIPutU16(uint32_t field_idx,
- mirror::Object* obj,
- uint16_t new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->SetChar<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
+MTERP_FIELD_ACCESSORS_FOR_TYPE(I8, int8_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(U8, uint8_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(I16, int16_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(U16, uint16_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(U32, uint32_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(U64, uint64_t, Primitive)
+MTERP_FIELD_ACCESSORS_FOR_TYPE(Obj, uint32_t, Object)
-extern "C" ssize_t MterpIPutI16(uint32_t field_idx,
- mirror::Object* obj,
- uint16_t new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->SetShort<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
+// Check that the primitive type for Obj variant above is correct.
+// It really must be primitive type for the templates to compile.
+// In the case of objects, it is only used to get the field size.
+static_assert(kHeapReferenceSize == sizeof(uint32_t), "Unexpected kHeapReferenceSize");
-extern "C" ssize_t MterpIPutU32(uint32_t field_idx,
- mirror::Object* obj,
- uint32_t new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->Set32<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" ssize_t MterpIPutU64(uint32_t field_idx,
- mirror::Object* obj,
- uint64_t* new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->Set64<false>(obj, *new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-extern "C" ssize_t artSetObjInstanceFromMterp(uint32_t field_idx,
- mirror::Object* obj,
- mirror::Object* new_value,
- ArtMethod* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtField* field = referrer->GetDexCache()->GetResolvedField(field_idx, kRuntimePointerSize);
- if (LIKELY(field != nullptr && obj != nullptr)) {
- field->SetObj<false>(obj, new_value);
- return 0; // success
- }
- return -1; // failure
-}
-
-template <typename return_type, Primitive::Type primitive_type>
-ALWAYS_INLINE return_type MterpGetStatic(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self,
- return_type (ArtField::*func)(ObjPtr<mirror::Object>))
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return_type res = 0; // On exception, the result will be ignored.
- ArtField* f =
- FindFieldFromCode<StaticPrimitiveRead, false>(field_idx,
- referrer,
- self,
- primitive_type);
- if (LIKELY(f != nullptr)) {
- ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
- res = (f->*func)(obj);
- }
- return res;
-}
-
-extern "C" int32_t MterpSGetU8(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
- referrer,
- self,
- &ArtField::GetBoolean);
-}
-
-extern "C" int32_t MterpSGetI8(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<int8_t, Primitive::kPrimByte>(field_idx,
- referrer,
- self,
- &ArtField::GetByte);
-}
-
-extern "C" uint32_t MterpSGetU16(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
- referrer,
- self,
- &ArtField::GetChar);
-}
-
-extern "C" int32_t MterpSGetI16(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<int16_t, Primitive::kPrimShort>(field_idx,
- referrer,
- self,
- &ArtField::GetShort);
-}
-
-extern "C" mirror::Object* MterpSGetObj(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<ObjPtr<mirror::Object>, Primitive::kPrimNot>(field_idx,
- referrer,
- self,
- &ArtField::GetObject).Ptr();
-}
-
-extern "C" int32_t MterpSGetU32(uint32_t field_idx,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<int32_t, Primitive::kPrimInt>(field_idx,
- referrer,
- self,
- &ArtField::GetInt);
-}
-
-extern "C" int64_t MterpSGetU64(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpGetStatic<int64_t, Primitive::kPrimLong>(field_idx,
- referrer,
- self,
- &ArtField::GetLong);
-}
-
-
-template <typename field_type, Primitive::Type primitive_type>
-int MterpSetStatic(uint32_t field_idx,
- field_type new_value,
- ArtMethod* referrer,
- Thread* self,
- void (ArtField::*func)(ObjPtr<mirror::Object>, field_type val))
- REQUIRES_SHARED(Locks::mutator_lock_) {
- int res = 0; // Assume success (following quick_field_entrypoints conventions)
- ArtField* f =
- FindFieldFromCode<StaticPrimitiveWrite, false>(field_idx, referrer, self, primitive_type);
- if (LIKELY(f != nullptr)) {
- ObjPtr<mirror::Object> obj = f->GetDeclaringClass();
- (f->*func)(obj, new_value);
- } else {
- res = -1; // Failure
- }
- return res;
-}
-
-extern "C" int MterpSPutU8(uint32_t field_idx,
- uint8_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<uint8_t, Primitive::kPrimBoolean>(field_idx,
- new_value,
- referrer,
- self,
- &ArtField::SetBoolean<false>);
-}
-
-extern "C" int MterpSPutI8(uint32_t field_idx,
- int8_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<int8_t, Primitive::kPrimByte>(field_idx,
- new_value,
- referrer,
- self,
- &ArtField::SetByte<false>);
-}
-
-extern "C" int MterpSPutU16(uint32_t field_idx,
- uint16_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<uint16_t, Primitive::kPrimChar>(field_idx,
- new_value,
- referrer,
- self,
- &ArtField::SetChar<false>);
-}
-
-extern "C" int MterpSPutI16(uint32_t field_idx,
- int16_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<int16_t, Primitive::kPrimShort>(field_idx,
- new_value,
- referrer,
- self,
- &ArtField::SetShort<false>);
-}
-
-extern "C" int MterpSPutU32(uint32_t field_idx,
- int32_t new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<int32_t, Primitive::kPrimInt>(field_idx,
- new_value,
- referrer,
- self,
- &ArtField::SetInt<false>);
-}
-
-extern "C" int MterpSPutU64(uint32_t field_idx,
- int64_t* new_value,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return MterpSetStatic<int64_t, Primitive::kPrimLong>(field_idx,
- *new_value,
- referrer,
- self,
- &ArtField::SetLong<false>);
-}
+#undef MTERP_FIELD_ACCESSORS_FOR_TYPE
+#undef MTERP_FIELD_ACCESSOR
extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr,
int32_t index)
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index 1a56d26..81a53c8 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -25,8 +25,6 @@
*/
extern "C" void* artMterpAsmInstructionStart[];
extern "C" void* artMterpAsmInstructionEnd[];
-extern "C" void* artMterpAsmAltInstructionStart[];
-extern "C" void* artMterpAsmAltInstructionEnd[];
namespace art {
@@ -50,6 +48,8 @@
// Set true to enable poison testing of ExportPC. Uses Alt interpreter.
constexpr bool kTestExportPC = false;
+constexpr size_t kMterpHandlerSize = 128;
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
deleted file mode 100644
index 394a849..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ /dev/null
@@ -1,12529 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'arm'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: arm/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM EABI general notes:
-
-r0-r3 hold first 4 args to a method; they are not preserved across method calls
-r4-r8 are available for general use
-r9 is given special treatment in some situations, but not for us
-r10 (sl) seems to be generally available
-r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
-r12 (ip) is scratch -- not preserved across method calls
-r13 (sp) should be managed carefully in case a signal arrives
-r14 (lr) must be preserved
-r15 (pc) can be tinkered with directly
-
-r0 holds returns of <= 4 bytes
-r0-r1 hold returns of 8 bytes, low word in r0
-
-Callee must save/restore r4+ (except r12) if it modifies them. If VFP
-is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
-s0-s15 (d0-d7, q0-a3) do not need to be.
-
-Stack is "full descending". Only the arguments that don't fit in the first 4
-registers are placed on the stack. "sp" points at the first stacked argument
-(i.e. the 5th arg).
-
-VFP: single-precision results in s0, double-precision results in d0.
-
-In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
-64-bit quantities (long long, double) must be 64-bit aligned.
-*/
-
-/*
-Mterp and ARM notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- r4 rPC interpreted program counter, used for fetching instructions
- r5 rFP interpreted frame pointer, used for accessing locals and args
- r6 rSELF self (Thread) pointer
- r7 rINST first 16-bit code unit of current instruction
- r8 rIBASE interpreted instruction base pointer, used for computed goto
- r10 rPROFILE branch profiling countdown
- r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC r4
-#define CFI_DEX 4 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define rFP r5
-#define rSELF r6
-#define rINST r7
-#define rIBASE r8
-#define rPROFILE r10
-#define rREFS r11
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-.macro EXPORT_DEX_PC tmp
- ldr \tmp, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
- sub \tmp, rPC, \tmp
- asr \tmp, #1
- str \tmp, [rFP, #OFF_FP_DEX_PC]
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- ldrh rINST, [rPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh rINST, [rPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to rPC and rINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh rINST, [rPC, #((\count)*2)]
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- add rPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg. Updates
- * rPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- *
- * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
- * bits that hold the shift distance are used for the half/byte/sign flags.
- * In some cases we can pre-double _reg for free, so we require a byte offset
- * here.
- */
-.macro FETCH_ADVANCE_INST_RB reg
- ldrh rINST, [rPC, \reg]!
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [rPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [rPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [rPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Because this only jumps within the
- * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
- */
-.macro GOTO_OPCODE reg
- add pc, rIBASE, \reg, lsl #7
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add pc, \base, \reg, lsl #7
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [rFP, \vreg, lsl #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [rFP, \vreg, lsl #2]
- mov \reg, #0
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [rFP, \vreg, lsl #2]
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-.macro SET_VREG_SHADOW reg, vreg
- str \reg, [rREFS, \vreg, lsl #2]
-.endm
-
-/*
- * Clear the corresponding shadow regs for a vreg pair
- */
-.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
- mov \tmp1, #0
- add \tmp2, \vreg, #1
- SET_VREG_SHADOW \tmp1, \vreg
- SET_VREG_SHADOW \tmp1, \tmp2
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .arm
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
-.endm
-
-.macro END name
- .fnend
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-/* File: arm/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
-
-/*
- * On entry:
- * r0 Thread* self/
- * r1 insns_
- * r2 ShadowFrame
- * r3 JValue* result_register
- *
- */
-
-ENTRY ExecuteMterpImpl
- stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r3, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r6, 12
- .cfi_rel_offset r7, 16
- .cfi_rel_offset r8, 20
- .cfi_rel_offset r9, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset fp, 32
- .cfi_rel_offset lr, 36
-
- /* Remember the return register */
- str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov rSELF, r0
- ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
- VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
- ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
- add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- bl MterpSetUpHotnessCountdown
- mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST @ load rINST from rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
- /* NOTE: no fallthrough */
-
-/* File: arm/instruction_start.S */
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: arm/op_nop.S */
- FETCH_ADVANCE_INST 1 @ advance to next instr, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- GOTO_OPCODE ip @ execute it
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: arm/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: arm/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: arm/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: arm/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: arm/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 1 @ r3<- BBBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: arm/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH r3, 2 @ r3<- BBBB
- FETCH r2, 1 @ r2<- AAAA
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
- VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: arm/op_move_object.S */
-/* File: arm/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: arm/op_move_object_from16.S */
-/* File: arm/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH r1, 1 @ r1<- BBBB
- mov r0, rINST, lsr #8 @ r0<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[AA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: arm/op_move_object_16.S */
-/* File: arm/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH r1, 2 @ r1<- BBBB
- FETCH r0, 1 @ r0<- AAAA
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[BBBB]
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r2, r0 @ fp[AAAA]<- r2
- .else
- SET_VREG r2, r0 @ fp[AAAA]<- r2
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: arm/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 0
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: arm/op_move_result_wide.S */
- /* move-result-wide vAA */
- mov rINST, rINST, lsr #8 @ rINST<- AA
- ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- retval.j
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: arm/op_move_result_object.S */
-/* File: arm/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r0, [rFP, #OFF_FP_RESULT_REGISTER] @ get pointer to result JType.
- ldr r0, [r0] @ r0 <- result.i.
- GET_INST_OPCODE ip @ extract opcode from rINST
- .if 1
- SET_VREG_OBJECT r0, r2, r1 @ fp[AA]<- r0
- .else
- SET_VREG r0, r2 @ fp[AA]<- r0
- .endif
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: arm/op_move_exception.S */
- /* move-exception vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r1, #0 @ r1<- 0
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- SET_VREG_OBJECT r3, r2 @ fp[AA]<- exception obj
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ clear exception
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: arm/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: arm/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: arm/op_return_wide.S */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: arm/op_return_object.S */
-/* File: arm/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA
- mov r1, #0
- b MterpReturn
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: arm/op_const_4.S */
- /* const/4 vA, #+B */
- sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
- ubfx r0, rINST, #8, #4 @ r0<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- SET_VREG r1, r0 @ fp[A]<- r1
- GOTO_OPCODE ip @ execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: arm/op_const_16.S */
- /* const/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: arm/op_const.S */
- /* const vAA, #+BBBBbbbb */
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r3 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: arm/op_const_high16.S */
- /* const/high16 vAA, #+BBBB0000 */
- FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, r0, lsl #16 @ r0<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r3 @ vAA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: arm/op_const_wide_16.S */
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r1, r0, asr #31 @ r1<- ssssssss
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: arm/op_const_wide_32.S */
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH r0, 1 @ r0<- 0000bbbb (low)
- mov r3, rINST, lsr #8 @ r3<- AA
- FETCH_S r2, 2 @ r2<- ssssBBBB (high)
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
- CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- mov r1, r0, asr #31 @ r1<- ssssssss
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: arm/op_const_wide.S */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r1, 2 @ r1<- BBBB (low middle)
- FETCH r2, 3 @ r2<- hhhh (high middle)
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
- FETCH r3, 4 @ r3<- HHHH (high)
- mov r9, rINST, lsr #8 @ r9<- AA
- orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
- CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
- FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: arm/op_const_wide_high16.S */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
- mov r3, rINST, lsr #8 @ r3<- AA
- mov r0, #0 @ r0<- 00000000
- mov r1, r1, lsl #16 @ r1<- BBBB0000
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: arm/op_const_string.S */
-/* File: arm/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: arm/op_const_string_jumbo.S */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (low)
- FETCH r2, 2 @ r2<- BBBB (high)
- mov r1, rINST, lsr #8 @ r1<- AA
- orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 @ advance rPC
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 3 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: arm/op_const_class.S */
-/* File: arm/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: arm/op_monitor_enter.S */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r1<- self
- bl artLockObjectFromCode
- cmp r0, #0
- bne MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: arm/op_monitor_exit.S */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r0, r2 @ r0<- vAA (object)
- mov r1, rSELF @ r0<- self
- bl artUnlockObjectFromCode @ r0<- success for unlock(self, obj)
- cmp r0, #0 @ failed?
- bne MterpException
- FETCH_ADVANCE_INST 1 @ before throw: advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: arm/op_check_cast.S */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpCheckCast @ (index, &obj, method, self)
- PREFETCH_INST 2
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: arm/op_instance_of.S */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- FETCH r0, 1 @ r0<- CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
- mov r3, rSELF @ r3<- self
- bl MterpInstanceOf @ (index, &obj, method, self)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r9, rINST, #8, #4 @ r9<- A
- PREFETCH_INST 2
- cmp r1, #0 @ exception pending?
- bne MterpException
- ADVANCE 2 @ advance rPC
- SET_VREG r0, r9 @ vA<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: arm/op_array_length.S */
- /*
- * Return the length of an array.
- */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r0, r1 @ r0<- vB (object ref)
- cmp r0, #0 @ is object null?
- beq common_errNullObject @ yup, fail
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r3, r2 @ vB<- length
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: arm/op_new_instance.S */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rSELF
- mov r2, rINST
- bl MterpNewInstance @ (shadow_frame, self, inst_data)
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: arm/op_new_array.S */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: arm/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl MterpFilledNewArray
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: arm/op_filled_new_array_range.S */
-/* File: arm/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rSELF
- bl MterpFilledNewArrayRange
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: arm/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
- GET_VREG r0, r3 @ r0<- vAA (array object)
- add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
- bl MterpFillArrayData @ (obj, payload)
- cmp r0, #0 @ 0 means an exception is thrown
- beq MterpPossibleException @ exception?
- FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: arm/op_throw.S */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- mov r2, rINST, lsr #8 @ r2<- AA
- GET_VREG r1, r2 @ r1<- vAA (exception object)
- cmp r1, #0 @ null object?
- beq common_errNullObject @ yes, throw an NPE instead
- str r1, [rSELF, #THREAD_EXCEPTION_OFFSET] @ thread->exception<- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: arm/op_goto.S */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: arm/op_goto_16.S */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: arm/op_goto_32.S */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH r0, 1 @ r0<- aaaa (lo)
- FETCH r3, 2 @ r1<- AAAA (hi)
- orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
- b MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: arm/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl MterpDoPackedSwitch @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: arm/op_sparse_switch.S */
-/* File: arm/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH r0, 1 @ r0<- bbbb (lo)
- FETCH r1, 2 @ r1<- BBBB (hi)
- mov r3, rINST, lsr #8 @ r3<- AA
- orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
- GET_VREG r1, r3 @ r1<- vAA
- add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
- bl MterpDoSparseSwitch @ r0<- code-unit branch offset
- movs rINST, r0
- b MterpCommonTakenBranch
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: arm/op_cmpl_float.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: arm/op_cmpg_float.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
- vcmpe.f32 s0, s1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: arm/op_cmpl_double.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x > y) {
- * return 1;
- * } else if (x < y) {
- * return -1;
- * } else {
- * return -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mvn r0, #0 @ r0<- -1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- movgt r0, #1 @ (greater than) r1<- 1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: arm/op_cmpg_double.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return 1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
- vcmpe.f64 d0, d1 @ compare (vBB, vCC)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, #1 @ r0<- 1 (default)
- GET_INST_OPCODE ip @ extract opcode from rINST
- fmstat @ export status flags
- mvnmi r0, #0 @ (less than) r1<- -1
- moveq r0, #0 @ (equal) r1<- 0
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: arm/op_cmp_long.S */
- /*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- cmp r0, r2
- sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
- mov ip, #0
- mvnlt ip, #0 @ -1
- cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
- orrne ip, #1
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG ip, r9 @ vAA<- ip
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: arm/op_if_eq.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- beq MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: arm/op_if_ne.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bne MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: arm/op_if_lt.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- blt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: arm/op_if_ge.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bge MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: arm/op_if_gt.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- bgt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: arm/op_if_le.S */
-/* File: arm/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r0, rINST, #8, #4 @ r0<- A
- GET_VREG r3, r1 @ r3<- vB
- GET_VREG r0, r0 @ r0<- vA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, r3 @ compare (vA, vB)
- ble MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: arm/op_if_eqz.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- beq MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: arm/op_if_nez.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bne MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: arm/op_if_ltz.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- blt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: arm/op_if_gez.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bge MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: arm/op_if_gtz.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- bgt MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: arm/op_if_lez.S */
-/* File: arm/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- mov r0, rINST, lsr #8 @ r0<- AA
- GET_VREG r0, r0 @ r0<- vAA
- FETCH_S rINST, 1 @ rINST<- branch offset, in code units
- cmp r0, #0 @ compare (vA, 0)
- ble MterpCommonTakenBranchNoFlags
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: arm/op_unused_3e.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: arm/op_unused_3f.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: arm/op_unused_40.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: arm/op_unused_41.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: arm/op_unused_42.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: arm/op_unused_43.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: arm/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldr r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: arm/op_aget_wide.S */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: arm/op_aget_object.S */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- bl artAGetObjectFromMterp @ (array, index)
- ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
- PREFETCH_INST 2
- cmp r1, #0
- bne MterpException
- SET_VREG_OBJECT r0, r9
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: arm/op_aget_boolean.S */
-/* File: arm/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: arm/op_aget_byte.S */
-/* File: arm/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrsb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: arm/op_aget_char.S */
-/* File: arm/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: arm/op_aget_short.S */
-/* File: arm/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldrsh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ r2<- vBB[vCC]
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r2, r9 @ vAA<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: arm/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- str r2, [r0, #MIRROR_INT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: arm/op_aput_wide.S */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
- GET_INST_OPCODE ip @ extract opcode from rINST
- strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: arm/op_aput_object.S */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpAputObject
- cmp r0, #0
- beq MterpPossibleException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: arm/op_aput_boolean.S */
-/* File: arm/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strb r2, [r0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: arm/op_aput_byte.S */
-/* File: arm/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strb r2, [r0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: arm/op_aput_char.S */
-/* File: arm/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strh r2, [r0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: arm/op_aput_short.S */
-/* File: arm/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B r2, 1, 0 @ r2<- BB
- mov r9, rINST, lsr #8 @ r9<- AA
- FETCH_B r3, 1, 1 @ r3<- CC
- GET_VREG r0, r2 @ r0<- vBB (array object)
- GET_VREG r1, r3 @ r1<- vCC (requested index)
- cmp r0, #0 @ null array object?
- beq common_errNullObject @ yes, bail
- ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
- add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
- cmp r1, r3 @ compare unsigned index, length
- bcs common_errArrayIndex @ index >= length, bail
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_VREG r2, r9 @ r2<- vAA
- GET_INST_OPCODE ip @ extract opcode from rINST
- strh r2, [r0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] @ vBB[vCC]<- r2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetU32
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 0
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: arm/op_iget_wide.S */
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetU64
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpException @ bail out
- CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: arm/op_iget_object.S */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetObj
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 1
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: arm/op_iget_boolean.S */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetU8
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 0
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: arm/op_iget_byte.S */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetI8
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 0
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: arm/op_iget_char.S */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetU16
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 0
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: arm/op_iget_short.S */
-/* File: arm/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- referrer
- mov r3, rSELF @ r3<- self
- bl MterpIGetI16
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- .if 0
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- .else
- SET_VREG r0, r2 @ fp[A]<- r0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: arm/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU32
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU32
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: arm/op_iput_wide.S */
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU64
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: arm/op_iput_object.S */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpIPutObj
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: arm/op_iput_boolean.S */
-/* File: arm/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU8
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU8
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: arm/op_iput_byte.S */
-/* File: arm/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI8
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutI8
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: arm/op_iput_char.S */
-/* File: arm/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU16
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU16
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: arm/op_iput_short.S */
-/* File: arm/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI16
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref CCCC
- mov r1, rINST, lsr #12 @ r1<- B
- GET_VREG r1, r1 @ r1<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- GET_VREG r2, r2 @ r2<- fp[A]
- ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer
- PREFETCH_INST 2
- bl MterpIPutI16
- cmp r0, #0
- bne MterpPossibleException
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetU32
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetU32
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 0
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: arm/op_sget_wide.S */
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field@BBBB */
-
- .extern MterpSGetU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetU64
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r9, rINST, lsr #8 @ r9<- AA
- VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA]
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- CLEAR_SHADOW_PAIR r9, r2, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ vAA/vAA+1<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: arm/op_sget_object.S */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetObj
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetObj
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 1
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: arm/op_sget_boolean.S */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetU8
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetU8
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 0
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: arm/op_sget_byte.S */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetI8
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetI8
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 0
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: arm/op_sget_char.S */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetU16
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetU16
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 0
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: arm/op_sget_short.S */
-/* File: arm/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
-
- .extern MterpSGetI16
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- ldr r1, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpSGetI16
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- mov r2, rINST, lsr #8 @ r2<- AA
- PREFETCH_INST 2
- cmp r3, #0 @ Fail to resolve?
- bne MterpException @ bail out
-.if 0
- SET_VREG_OBJECT r0, r2 @ fp[AA]<- r0
-.else
- SET_VREG r0, r2 @ fp[AA]<- r0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: arm/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutU32
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: arm/op_sput_wide.S */
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- VREG_INDEX_TO_ADDR r1, r1
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutU64
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: arm/op_sput_object.S */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- mov r3, rSELF
- bl MterpSPutObj
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: arm/op_sput_boolean.S */
-/* File: arm/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutU8
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: arm/op_sput_byte.S */
-/* File: arm/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutI8
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: arm/op_sput_char.S */
-/* File: arm/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutU16
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: arm/op_sput_short.S */
-/* File: arm/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC
- FETCH r0, 1 @ r0<- field ref BBBB
- mov r3, rINST, lsr #8 @ r3<- AA
- GET_VREG r1, r3 @ r1<= fp[AA]
- ldr r2, [rFP, #OFF_FP_METHOD]
- mov r3, rSELF
- PREFETCH_INST 2 @ Get next inst, but don't advance rPC
- bl MterpSPutI16
- cmp r0, #0 @ 0 on success, -1 on failure
- bne MterpException
- ADVANCE 2 @ Past exception point - now advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: arm/op_invoke_virtual.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtual
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: arm/op_invoke_super.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeSuper
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: arm/op_invoke_direct.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeDirect
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: arm/op_invoke_static.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeStatic
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: arm/op_invoke_interface.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeInterface
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: arm/op_return_void_no_barrier.S */
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- mov r0, rSELF
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- blne MterpSuspendCheck @ (self)
- mov r0, #0
- mov r1, #0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: arm/op_invoke_virtual_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: arm/op_invoke_super_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeSuperRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: arm/op_invoke_direct_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeDirectRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: arm/op_invoke_static_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeStaticRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: arm/op_invoke_interface_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeInterfaceRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: arm/op_unused_79.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: arm/op_unused_7a.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: arm/op_neg_int.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- rsb r0, r0, #0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: arm/op_not_int.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mvn r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: arm/op_neg_long.S */
-/* File: arm/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- rsbs r0, r0, #0 @ optional op; may set condition codes
- rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: arm/op_not_long.S */
-/* File: arm/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mvn r0, r0 @ optional op; may set condition codes
- mvn r1, r1 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: arm/op_neg_float.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: arm/op_neg_double.S */
-/* File: arm/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: arm/op_int_to_long.S */
-/* File: arm/unopWider.S */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: arm/op_int_to_float.S */
-/* File: arm/funop.S */
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitos s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s1, [r9] @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: arm/op_int_to_double.S */
-/* File: arm/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fsitod d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: arm/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: arm/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B from 15:12
- ubfx r0, rINST, #8, #4 @ r0<- A from 11:8
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- GET_VREG r2, r1 @ r2<- fp[B]
- GET_INST_OPCODE ip @ ip<- opcode from rINST
- .if 0
- SET_VREG_OBJECT r2, r0 @ fp[A]<- r2
- .else
- SET_VREG r2, r0 @ fp[A]<- r2
- .endif
- GOTO_OPCODE ip @ execute next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: arm/op_long_to_float.S */
-/* File: arm/unopNarrower.S */
- /*
- * Generic 64bit-to-32bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0/r1", where
- * "result" is a 32-bit quantity in r0.
- *
- * For: long-to-float, double-to-int, double-to-float
- *
- * (This would work for long-to-int, but that instruction is actually
- * an exact match for op_move.)
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_l2f @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: arm/op_long_to_double.S */
- /*
- * Specialised 64-bit floating point operation.
- *
- * Note: The result will be returned in d2.
- *
- * For: long-to-double
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- vldr d0, [r3] @ d0<- vAA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- vcvt.f64.s32 d1, s1 @ d1<- (double)(vAAh)
- vcvt.f64.u32 d2, s0 @ d2<- (double)(vAAl)
- vldr d3, constvalop_long_to_double
- vmla.f64 d2, d1, d3 @ d2<- vAAh*2^32 + vAAl
-
- GET_INST_OPCODE ip @ extract opcode from rINST
- vstr.64 d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
- /* literal pool helper */
-constvalop_long_to_double:
- .8byte 0x41f0000000000000
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: arm/op_float_to_int.S */
-/* File: arm/funop.S */
- /*
- * Generic 32-bit unary floating-point operation. Provide an "instr"
- * line that specifies an instruction that performs "s1 = op s0".
- *
- * for: int-to-float, float-to-int
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizs s1, s0 @ s1<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s1, [r9] @ vA<- s1
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: arm/op_float_to_long.S */
-/* File: arm/unopWider.S */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = op r0", where
- * "result" is a 64-bit quantity in r0/r1.
- *
- * For: int-to-long, int-to-double, float-to-long, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- GET_VREG r0, r3 @ r0<- vB
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- @ optional op; may set condition codes
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- bl f2l_doconv @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 9-10 instructions */
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: arm/op_float_to_double.S */
-/* File: arm/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f64.f32 d0, s0 @ d0<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: arm/op_double_to_int.S */
-/* File: arm/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- ftosizd s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s0, [r9] @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: arm/op_double_to_long.S */
-/* File: arm/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0/r1".
- * This could be an ARM instruction or a function call.
- *
- * For: neg-long, not-long, neg-double, long-to-double, double-to-long
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl d2l_doconv @ r0/r1<- op, r2-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-11 instructions */
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: arm/op_double_to_float.S */
-/* File: arm/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit unary floating point operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: double-to-int, double-to-float
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
- ubfx r9, rINST, #8, #4 @ r9<- A
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- vcvt.f32.f64 s0, d0 @ s0<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fsts s0, [r9] @ vA<- s0
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: arm/op_int_to_byte.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- sxtb r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: arm/op_int_to_char.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- uxth r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: arm/op_int_to_short.S */
-/* File: arm/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op r0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r3 @ r0<- vB
- @ optional op; may set condition codes
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- sxth r0, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: arm/op_add_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: arm/op_sub_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- sub r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: arm/op_mul_int.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: arm/op_div_int.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: arm/op_rem_int.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int
- *
- */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op, r0-r2 changed
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: arm/op_and_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: arm/op_or_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: arm/op_xor_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: arm/op_shl_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: arm/op_shr_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: arm/op_ushr_int.S */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: arm/op_add_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- adds r0, r0, r2 @ optional op; may set condition codes
- adc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: arm/op_sub_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- subs r0, r0, r2 @ optional op; may set condition codes
- sbc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: arm/op_mul_long.S */
- /*
- * Signed 64-bit integer multiply.
- *
- * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
- * WX
- * x YZ
- * --------
- * ZW ZX
- * YW YX
- *
- * The low word of the result holds ZX, the high word holds
- * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
- * it doesn't fit in the low 64 bits.
- *
- * Unlike most ARM math operations, multiply instructions have
- * restrictions on using the same register more than once (Rd and Rm
- * cannot be the same).
- */
- /* mul-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST, lsr #8 @ r0<- AA
- add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
- CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: arm/op_div_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: arm/op_rem_long.S */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: arm/op_and_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- and r0, r0, r2 @ optional op; may set condition codes
- and r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: arm/op_or_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- orr r0, r0, r2 @ optional op; may set condition codes
- orr r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: arm/op_xor_long.S */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- eor r0, r0, r2 @ optional op; may set condition codes
- eor r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: arm/op_shl_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r2<- r2 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: arm/op_shr_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: arm/op_ushr_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- and r3, r0, #255 @ r3<- BB
- mov r0, r0, lsr #8 @ r0<- CC
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
- GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- and r2, r2, #63 @ r0<- r0 & 0x3f
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: arm/op_add_float.S */
-/* File: arm/fbinop.S */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fadds s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: arm/op_sub_float.S */
-/* File: arm/fbinop.S */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fsubs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: arm/op_mul_float.S */
-/* File: arm/fbinop.S */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fmuls s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: arm/op_div_float.S */
-/* File: arm/fbinop.S */
- /*
- * Generic 32-bit floating-point operation. Provide an "instr" line that
- * specifies an instruction that performs "s2 = s0 op s1". Because we
- * use the "softfp" ABI, this must be an instruction, not a function call.
- *
- * For: add-float, sub-float, mul-float, div-float
- */
- /* floatop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fdivs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: arm/op_rem_float.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- GET_VREG r1, r3 @ r1<- vCC
- GET_VREG r0, r2 @ r0<- vBB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmodf @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: arm/op_add_double.S */
-/* File: arm/fbinopWide.S */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- faddd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: arm/op_sub_double.S */
-/* File: arm/fbinopWide.S */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fsubd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: arm/op_mul_double.S */
-/* File: arm/fbinopWide.S */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fmuld d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: arm/op_div_double.S */
-/* File: arm/fbinopWide.S */
- /*
- * Generic 64-bit double-precision floating point binary operation.
- * Provide an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * for: add-double, sub-double, mul-double, div-double
- */
- /* doubleop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
- mov r3, r0, lsr #8 @ r3<- CC
- and r2, r0, #255 @ r2<- BB
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- fdivd d2, d0, d1 @ s2<- op
- CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: arm/op_rem_double.S */
-/* EABI doesn't define a double remainder function, but libm does */
-/* File: arm/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH r0, 1 @ r0<- CCBB
- mov rINST, rINST, lsr #8 @ rINST<- AA
- and r2, r0, #255 @ r2<- BB
- mov r3, r0, lsr #8 @ r3<- CC
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
- VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 14-17 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: arm/op_add_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: arm/op_sub_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- sub r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: arm/op_mul_int_2addr.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: arm/op_div_int_2addr.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: arm/op_rem_int_2addr.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/2addr
- *
- */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: arm/op_and_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: arm/op_or_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: arm/op_xor_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: arm/op_shl_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: arm/op_shr_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: arm/op_ushr_int_2addr.S */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- and r1, r1, #31 @ optional op; may set condition codes
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: arm/op_add_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- adds r0, r0, r2 @ optional op; may set condition codes
- adc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: arm/op_sub_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- subs r0, r0, r2 @ optional op; may set condition codes
- sbc r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: arm/op_mul_long_2addr.S */
- /*
- * Signed 64-bit integer multiply, "/2addr" version.
- *
- * See op_mul_long for an explanation.
- *
- * We get a little tight on registers, so to avoid looking up &fp[A]
- * again we stuff it into rINST.
- */
- /* mul-long/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
- mul ip, r2, r1 @ ip<- ZxW
- umull r1, lr, r2, r0 @ r1/lr <- ZxX
- mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
- mov r0, rINST @ r0<- &fp[A] (free up rINST)
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: arm/op_div_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: arm/op_rem_long_2addr.S */
-/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 1
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl __aeabi_ldivmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: arm/op_and_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- and r0, r0, r2 @ optional op; may set condition codes
- and r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: arm/op_or_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- orr r0, r0, r2 @ optional op; may set condition codes
- orr r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: arm/op_xor_long_2addr.S */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- eor r0, r0, r2 @ optional op; may set condition codes
- eor r1, r1, r3 @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: arm/op_shl_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r1, r1, asl r2 @ r1<- r1 << r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
- mov r0, r0, asl r2 @ r0<- r0 << r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: arm/op_shr_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
- mov r1, r1, asr r2 @ r1<- r1 >> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm/op_ushr_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r2, r3 @ r2<- vB
- CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
- and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- mov r0, r0, lsr r2 @ r0<- r2 >> r2
- rsb r3, r2, #32 @ r3<- 32 - r2
- orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
- subs ip, r2, #32 @ ip<- r2 - 32
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
- mov r1, r1, lsr r2 @ r1<- r1 >>> r2
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: arm/op_add_float_2addr.S */
-/* File: arm/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fadds s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: arm/op_sub_float_2addr.S */
-/* File: arm/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fsubs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: arm/op_mul_float_2addr.S */
-/* File: arm/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fmuls s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: arm/op_div_float_2addr.S */
-/* File: arm/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
- fdivs s2, s0, s1 @ s2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: arm/op_rem_float_2addr.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r1, r3 @ r1<- vB
- GET_VREG r0, r9 @ r0<- vA
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
- @ optional op; may set condition codes
- bl fmodf @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: arm/op_add_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- faddd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: arm/op_sub_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fsubd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: arm/op_mul_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fmuld d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: arm/op_div_double_2addr.S */
-/* File: arm/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "d2 = d0 op d1".
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
- VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
- fdivd d2, d0, d1 @ d2<- op
- GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: arm/op_rem_double_2addr.S */
-/* EABI doesn't define a double remainder function, but libm does */
-/* File: arm/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0-r1 op r2-r3".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr,
- * rem-double/2addr
- */
- /* binop/2addr vA, vB */
- mov r1, rINST, lsr #12 @ r1<- B
- ubfx rINST, rINST, #8, #4 @ rINST<- A
- VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
- VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
- .if 0
- orrs ip, r2, r3 @ second arg (r2-r3) is zero?
- beq common_errDivideByZero
- .endif
- CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
- FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- @ optional op; may set condition codes
- bl fmod @ result<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 12-15 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: arm/op_add_int_lit16.S */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- add r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: arm/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- rsb r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: arm/op_mul_int_lit16.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: arm/op_div_int_lit16.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: arm/op_rem_int_lit16.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit16
- *
- */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: arm/op_and_int_lit16.S */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- and r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: arm/op_or_int_lit16.S */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- orr r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: arm/op_xor_int_lit16.S */
-/* File: arm/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
- mov r2, rINST, lsr #12 @ r2<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
- GET_VREG r0, r2 @ r0<- vB
- .if 0
- cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- eor r0, r0, r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: arm/op_add_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- add r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm/op_rsub_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- rsb r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: arm/op_mul_int_lit8.S */
-/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- asr r1, r3, #8 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mul r0, r1, r0 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: arm/op_div_int_lit8.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * div-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r0, r0, r1 @ r0<- op
-#else
- bl __aeabi_idiv @ r0<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: arm/op_rem_int_lit8.S */
- /*
- * Specialized 32-bit binary operation
- *
- * Performs "r1 = r0 rem r1". The selection between sdiv block or the gcc helper
- * depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
- * ARMv7 CPUs that have hardware division support).
- *
- * NOTE: idivmod returns quotient in r0 and remainder in r1
- *
- * rem-int/lit8
- *
- */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
-#ifdef __ARM_ARCH_EXT_IDIV__
- sdiv r2, r0, r1
- mls r1, r1, r2, r0 @ r1<- op
-#else
- bl __aeabi_idivmod @ r1<- op, r0-r3 changed
-#endif
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r1, r9 @ vAA<- r1
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: arm/op_and_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- and r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: arm/op_or_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- orr r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: arm/op_xor_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- eor r0, r0, r3, asr #8 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: arm/op_shl_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: arm/op_shr_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm/op_ushr_int_lit8.S */
-/* File: arm/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from r3 to r1 is not the default "asr r1, r3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (r1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
- mov r9, rINST, lsr #8 @ r9<- AA
- and r2, r3, #255 @ r2<- BB
- GET_VREG r0, r2 @ r0<- vBB
- ubfx r1, r3, #8, #5 @ optional; typically r1<- ssssssCC (sign extended)
- .if 0
- @cmp r1, #0 @ is second operand zero?
- beq common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
- mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
- GET_INST_OPCODE ip @ extract opcode from rINST
- SET_VREG r0, r9 @ vAA<- r0
- GOTO_OPCODE ip @ jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: arm/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldr r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: arm/op_iget_wide_quick.S */
- /* iget-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH ip, 1 @ ip<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
- CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
- GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: arm/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- EXPORT_PC
- GET_VREG r0, r2 @ r0<- object we're operating on
- bl artIGetObjectFromMterp @ (obj, offset)
- ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx r2, rINST, #8, #4 @ r2<- A
- PREFETCH_INST 2
- cmp r3, #0
- bne MterpPossibleException @ bail out
- SET_VREG_OBJECT r0, r2 @ fp[A]<- r0
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: arm/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- str r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: arm/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r3, 1 @ r3<- field byte offset
- GET_VREG r2, r2 @ r2<- fp[B], the object pointer
- ubfx r0, rINST, #8, #4 @ r0<- A
- cmp r2, #0 @ check object for null
- beq common_errNullObject @ object was null
- VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
- ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strd r0, [r2, r3] @ obj.field<- r0/r1
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: arm/op_iput_object_quick.S */
- EXPORT_PC
- add r0, rFP, #OFF_FP_SHADOWFRAME
- mov r1, rPC
- mov r2, rINST
- bl MterpIputObjectQuick
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm/op_invoke_virtual_quick.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualQuick
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm/op_invoke_virtual_range_quick.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeVirtualQuickRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: arm/op_iput_boolean_quick.S */
-/* File: arm/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strb r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: arm/op_iput_byte_quick.S */
-/* File: arm/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strb r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: arm/op_iput_char_quick.S */
-/* File: arm/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strh r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: arm/op_iput_short_quick.S */
-/* File: arm/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- fp[B], the object pointer
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- GET_VREG r0, r2 @ r0<- fp[A]
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- strh r0, [r3, r1] @ obj.field<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: arm/op_iget_boolean_quick.S */
-/* File: arm/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrb r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: arm/op_iget_byte_quick.S */
-/* File: arm/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsb r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: arm/op_iget_char_quick.S */
-/* File: arm/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrh r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: arm/op_iget_short_quick.S */
-/* File: arm/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- mov r2, rINST, lsr #12 @ r2<- B
- FETCH r1, 1 @ r1<- field byte offset
- GET_VREG r3, r2 @ r3<- object we're operating on
- ubfx r2, rINST, #8, #4 @ r2<- A
- cmp r3, #0 @ check object for null
- beq common_errNullObject @ object was null
- ldrsh r0, [r3, r1] @ r0<- obj.field
- FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- SET_VREG r0, r2 @ fp[A]<- r0
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: arm/op_unused_f3.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: arm/op_unused_f4.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: arm/op_unused_f5.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: arm/op_unused_f6.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: arm/op_unused_f7.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: arm/op_unused_f8.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: arm/op_unused_f9.S */
-/* File: arm/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: arm/op_invoke_polymorphic.S */
-/* File: arm/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokePolymorphic
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm/op_invoke_polymorphic_range.S */
-/* File: arm/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokePolymorphicRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: arm/op_invoke_custom.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeCustom
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle an invoke-custom invocation.
- *
- * for: invoke-custom, invoke-custom/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, call_site@BBBB */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, call_site@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: arm/op_invoke_custom_range.S */
-/* File: arm/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- mov r3, rINST
- bl MterpInvokeCustomRange
- cmp r0, #0
- beq MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: arm/op_const_method_handle.S */
-/* File: arm/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstMethodHandle @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: arm/op_const_method_type.S */
-/* File: arm/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- FETCH r0, 1 @ r0<- BBBB
- mov r1, rINST, lsr #8 @ r1<- AA
- add r2, rFP, #OFF_FP_SHADOWFRAME
- mov r3, rSELF
- bl MterpConstMethodType @ (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 @ load rINST
- cmp r0, #0 @ fail?
- bne MterpPossibleException @ let reference interpreter deal with it.
- ADVANCE 2 @ advance rPC
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-
- .balign 128
-/* File: arm/instruction_end.S */
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: arm/instruction_start_sister.S */
-
- .type artMterpAsmSisterStart, #object
- .hidden artMterpAsmSisterStart
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
-
-
-/* continuation for op_float_to_long */
-/*
- * Convert the float in r0 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-f2l_doconv:
- ubfx r2, r0, #23, #8 @ grab the exponent
- cmp r2, #0xbe @ MININT < x > MAXINT?
- bhs f2l_special_cases
- b __aeabi_f2lz @ tail call to convert float to long
-f2l_special_cases:
- cmp r2, #0xff @ NaN or infinity?
- beq f2l_maybeNaN
-f2l_notNaN:
- adds r0, r0, r0 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-f2l_maybeNaN:
- lsls r3, r0, #9
- beq f2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-
-/* continuation for op_double_to_long */
-/*
- * Convert the double in r0/r1 to a long in r0/r1.
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us.
- */
-d2l_doconv:
- ubfx r2, r1, #20, #11 @ grab the exponent
- movw r3, #0x43e
- cmp r2, r3 @ MINLONG < x > MAXLONG?
- bhs d2l_special_cases
- b __aeabi_d2lz @ tail call to convert double to long
-d2l_special_cases:
- movw r3, #0x7ff
- cmp r2, r3
- beq d2l_maybeNaN @ NaN?
-d2l_notNaN:
- adds r1, r1, r1 @ sign bit to carry
- mov r0, #0xffffffff @ assume maxlong for lsw
- mov r1, #0x7fffffff @ assume maxlong for msw
- adc r0, r0, #0
- adc r1, r1, #0 @ convert maxlong to minlong if exp negative
- bx lr @ return
-d2l_maybeNaN:
- orrs r3, r0, r1, lsl #12
- beq d2l_notNaN @ if fraction is non-zero, it's a NaN
- mov r0, #0
- mov r1, #0
- bx lr @ return 0 for NaN
-/* File: arm/instruction_end_sister.S */
-
- .type artMterpAsmSisterEnd, #object
- .hidden artMterpAsmSisterEnd
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: arm/instruction_start_alt.S */
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_nop
- sub lr, lr, #(.L_ALT_op_nop - .L_op_nop) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move
- sub lr, lr, #(.L_ALT_op_move - .L_op_move) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_from16
- sub lr, lr, #(.L_ALT_op_move_from16 - .L_op_move_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_16
- sub lr, lr, #(.L_ALT_op_move_16 - .L_op_move_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide
- sub lr, lr, #(.L_ALT_op_move_wide - .L_op_move_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide_from16
- sub lr, lr, #(.L_ALT_op_move_wide_from16 - .L_op_move_wide_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_wide_16
- sub lr, lr, #(.L_ALT_op_move_wide_16 - .L_op_move_wide_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object
- sub lr, lr, #(.L_ALT_op_move_object - .L_op_move_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object_from16
- sub lr, lr, #(.L_ALT_op_move_object_from16 - .L_op_move_object_from16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_object_16
- sub lr, lr, #(.L_ALT_op_move_object_16 - .L_op_move_object_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result
- sub lr, lr, #(.L_ALT_op_move_result - .L_op_move_result) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result_wide
- sub lr, lr, #(.L_ALT_op_move_result_wide - .L_op_move_result_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_result_object
- sub lr, lr, #(.L_ALT_op_move_result_object - .L_op_move_result_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_move_exception
- sub lr, lr, #(.L_ALT_op_move_exception - .L_op_move_exception) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_void
- sub lr, lr, #(.L_ALT_op_return_void - .L_op_return_void) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return
- sub lr, lr, #(.L_ALT_op_return - .L_op_return) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_wide
- sub lr, lr, #(.L_ALT_op_return_wide - .L_op_return_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_object
- sub lr, lr, #(.L_ALT_op_return_object - .L_op_return_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_4
- sub lr, lr, #(.L_ALT_op_const_4 - .L_op_const_4) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_16
- sub lr, lr, #(.L_ALT_op_const_16 - .L_op_const_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const
- sub lr, lr, #(.L_ALT_op_const - .L_op_const) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_high16
- sub lr, lr, #(.L_ALT_op_const_high16 - .L_op_const_high16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_16
- sub lr, lr, #(.L_ALT_op_const_wide_16 - .L_op_const_wide_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_32
- sub lr, lr, #(.L_ALT_op_const_wide_32 - .L_op_const_wide_32) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide
- sub lr, lr, #(.L_ALT_op_const_wide - .L_op_const_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_wide_high16
- sub lr, lr, #(.L_ALT_op_const_wide_high16 - .L_op_const_wide_high16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_string
- sub lr, lr, #(.L_ALT_op_const_string - .L_op_const_string) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_string_jumbo
- sub lr, lr, #(.L_ALT_op_const_string_jumbo - .L_op_const_string_jumbo) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_class
- sub lr, lr, #(.L_ALT_op_const_class - .L_op_const_class) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_monitor_enter
- sub lr, lr, #(.L_ALT_op_monitor_enter - .L_op_monitor_enter) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_monitor_exit
- sub lr, lr, #(.L_ALT_op_monitor_exit - .L_op_monitor_exit) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_check_cast
- sub lr, lr, #(.L_ALT_op_check_cast - .L_op_check_cast) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_instance_of
- sub lr, lr, #(.L_ALT_op_instance_of - .L_op_instance_of) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_array_length
- sub lr, lr, #(.L_ALT_op_array_length - .L_op_array_length) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_new_instance
- sub lr, lr, #(.L_ALT_op_new_instance - .L_op_new_instance) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_new_array
- sub lr, lr, #(.L_ALT_op_new_array - .L_op_new_array) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_filled_new_array
- sub lr, lr, #(.L_ALT_op_filled_new_array - .L_op_filled_new_array) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_filled_new_array_range
- sub lr, lr, #(.L_ALT_op_filled_new_array_range - .L_op_filled_new_array_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_fill_array_data
- sub lr, lr, #(.L_ALT_op_fill_array_data - .L_op_fill_array_data) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_throw
- sub lr, lr, #(.L_ALT_op_throw - .L_op_throw) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto
- sub lr, lr, #(.L_ALT_op_goto - .L_op_goto) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto_16
- sub lr, lr, #(.L_ALT_op_goto_16 - .L_op_goto_16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_goto_32
- sub lr, lr, #(.L_ALT_op_goto_32 - .L_op_goto_32) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_packed_switch
- sub lr, lr, #(.L_ALT_op_packed_switch - .L_op_packed_switch) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sparse_switch
- sub lr, lr, #(.L_ALT_op_sparse_switch - .L_op_sparse_switch) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpl_float
- sub lr, lr, #(.L_ALT_op_cmpl_float - .L_op_cmpl_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpg_float
- sub lr, lr, #(.L_ALT_op_cmpg_float - .L_op_cmpg_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpl_double
- sub lr, lr, #(.L_ALT_op_cmpl_double - .L_op_cmpl_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmpg_double
- sub lr, lr, #(.L_ALT_op_cmpg_double - .L_op_cmpg_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_cmp_long
- sub lr, lr, #(.L_ALT_op_cmp_long - .L_op_cmp_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_eq
- sub lr, lr, #(.L_ALT_op_if_eq - .L_op_if_eq) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ne
- sub lr, lr, #(.L_ALT_op_if_ne - .L_op_if_ne) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_lt
- sub lr, lr, #(.L_ALT_op_if_lt - .L_op_if_lt) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ge
- sub lr, lr, #(.L_ALT_op_if_ge - .L_op_if_ge) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gt
- sub lr, lr, #(.L_ALT_op_if_gt - .L_op_if_gt) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_le
- sub lr, lr, #(.L_ALT_op_if_le - .L_op_if_le) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_eqz
- sub lr, lr, #(.L_ALT_op_if_eqz - .L_op_if_eqz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_nez
- sub lr, lr, #(.L_ALT_op_if_nez - .L_op_if_nez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_ltz
- sub lr, lr, #(.L_ALT_op_if_ltz - .L_op_if_ltz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gez
- sub lr, lr, #(.L_ALT_op_if_gez - .L_op_if_gez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_gtz
- sub lr, lr, #(.L_ALT_op_if_gtz - .L_op_if_gtz) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_if_lez
- sub lr, lr, #(.L_ALT_op_if_lez - .L_op_if_lez) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_3e
- sub lr, lr, #(.L_ALT_op_unused_3e - .L_op_unused_3e) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_3f
- sub lr, lr, #(.L_ALT_op_unused_3f - .L_op_unused_3f) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_40
- sub lr, lr, #(.L_ALT_op_unused_40 - .L_op_unused_40) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_41
- sub lr, lr, #(.L_ALT_op_unused_41 - .L_op_unused_41) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_42
- sub lr, lr, #(.L_ALT_op_unused_42 - .L_op_unused_42) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_43
- sub lr, lr, #(.L_ALT_op_unused_43 - .L_op_unused_43) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget
- sub lr, lr, #(.L_ALT_op_aget - .L_op_aget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_wide
- sub lr, lr, #(.L_ALT_op_aget_wide - .L_op_aget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_object
- sub lr, lr, #(.L_ALT_op_aget_object - .L_op_aget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_boolean
- sub lr, lr, #(.L_ALT_op_aget_boolean - .L_op_aget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_byte
- sub lr, lr, #(.L_ALT_op_aget_byte - .L_op_aget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_char
- sub lr, lr, #(.L_ALT_op_aget_char - .L_op_aget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aget_short
- sub lr, lr, #(.L_ALT_op_aget_short - .L_op_aget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput
- sub lr, lr, #(.L_ALT_op_aput - .L_op_aput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_wide
- sub lr, lr, #(.L_ALT_op_aput_wide - .L_op_aput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_object
- sub lr, lr, #(.L_ALT_op_aput_object - .L_op_aput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_boolean
- sub lr, lr, #(.L_ALT_op_aput_boolean - .L_op_aput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_byte
- sub lr, lr, #(.L_ALT_op_aput_byte - .L_op_aput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_char
- sub lr, lr, #(.L_ALT_op_aput_char - .L_op_aput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_aput_short
- sub lr, lr, #(.L_ALT_op_aput_short - .L_op_aput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget
- sub lr, lr, #(.L_ALT_op_iget - .L_op_iget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_wide
- sub lr, lr, #(.L_ALT_op_iget_wide - .L_op_iget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_object
- sub lr, lr, #(.L_ALT_op_iget_object - .L_op_iget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_boolean
- sub lr, lr, #(.L_ALT_op_iget_boolean - .L_op_iget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_byte
- sub lr, lr, #(.L_ALT_op_iget_byte - .L_op_iget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_char
- sub lr, lr, #(.L_ALT_op_iget_char - .L_op_iget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_short
- sub lr, lr, #(.L_ALT_op_iget_short - .L_op_iget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput
- sub lr, lr, #(.L_ALT_op_iput - .L_op_iput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_wide
- sub lr, lr, #(.L_ALT_op_iput_wide - .L_op_iput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_object
- sub lr, lr, #(.L_ALT_op_iput_object - .L_op_iput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_boolean
- sub lr, lr, #(.L_ALT_op_iput_boolean - .L_op_iput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_byte
- sub lr, lr, #(.L_ALT_op_iput_byte - .L_op_iput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_char
- sub lr, lr, #(.L_ALT_op_iput_char - .L_op_iput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_short
- sub lr, lr, #(.L_ALT_op_iput_short - .L_op_iput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget
- sub lr, lr, #(.L_ALT_op_sget - .L_op_sget) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_wide
- sub lr, lr, #(.L_ALT_op_sget_wide - .L_op_sget_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_object
- sub lr, lr, #(.L_ALT_op_sget_object - .L_op_sget_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_boolean
- sub lr, lr, #(.L_ALT_op_sget_boolean - .L_op_sget_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_byte
- sub lr, lr, #(.L_ALT_op_sget_byte - .L_op_sget_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_char
- sub lr, lr, #(.L_ALT_op_sget_char - .L_op_sget_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sget_short
- sub lr, lr, #(.L_ALT_op_sget_short - .L_op_sget_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput
- sub lr, lr, #(.L_ALT_op_sput - .L_op_sput) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_wide
- sub lr, lr, #(.L_ALT_op_sput_wide - .L_op_sput_wide) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_object
- sub lr, lr, #(.L_ALT_op_sput_object - .L_op_sput_object) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_boolean
- sub lr, lr, #(.L_ALT_op_sput_boolean - .L_op_sput_boolean) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_byte
- sub lr, lr, #(.L_ALT_op_sput_byte - .L_op_sput_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_char
- sub lr, lr, #(.L_ALT_op_sput_char - .L_op_sput_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sput_short
- sub lr, lr, #(.L_ALT_op_sput_short - .L_op_sput_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual
- sub lr, lr, #(.L_ALT_op_invoke_virtual - .L_op_invoke_virtual) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_super
- sub lr, lr, #(.L_ALT_op_invoke_super - .L_op_invoke_super) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_direct
- sub lr, lr, #(.L_ALT_op_invoke_direct - .L_op_invoke_direct) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_static
- sub lr, lr, #(.L_ALT_op_invoke_static - .L_op_invoke_static) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_interface
- sub lr, lr, #(.L_ALT_op_invoke_interface - .L_op_invoke_interface) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_return_void_no_barrier
- sub lr, lr, #(.L_ALT_op_return_void_no_barrier - .L_op_return_void_no_barrier) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_range
- sub lr, lr, #(.L_ALT_op_invoke_virtual_range - .L_op_invoke_virtual_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_super_range
- sub lr, lr, #(.L_ALT_op_invoke_super_range - .L_op_invoke_super_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_direct_range
- sub lr, lr, #(.L_ALT_op_invoke_direct_range - .L_op_invoke_direct_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_static_range
- sub lr, lr, #(.L_ALT_op_invoke_static_range - .L_op_invoke_static_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_interface_range
- sub lr, lr, #(.L_ALT_op_invoke_interface_range - .L_op_invoke_interface_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_79
- sub lr, lr, #(.L_ALT_op_unused_79 - .L_op_unused_79) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_7a
- sub lr, lr, #(.L_ALT_op_unused_7a - .L_op_unused_7a) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_int
- sub lr, lr, #(.L_ALT_op_neg_int - .L_op_neg_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_not_int
- sub lr, lr, #(.L_ALT_op_not_int - .L_op_not_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_long
- sub lr, lr, #(.L_ALT_op_neg_long - .L_op_neg_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_not_long
- sub lr, lr, #(.L_ALT_op_not_long - .L_op_not_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_float
- sub lr, lr, #(.L_ALT_op_neg_float - .L_op_neg_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_neg_double
- sub lr, lr, #(.L_ALT_op_neg_double - .L_op_neg_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_long
- sub lr, lr, #(.L_ALT_op_int_to_long - .L_op_int_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_float
- sub lr, lr, #(.L_ALT_op_int_to_float - .L_op_int_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_double
- sub lr, lr, #(.L_ALT_op_int_to_double - .L_op_int_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_int
- sub lr, lr, #(.L_ALT_op_long_to_int - .L_op_long_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_float
- sub lr, lr, #(.L_ALT_op_long_to_float - .L_op_long_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_long_to_double
- sub lr, lr, #(.L_ALT_op_long_to_double - .L_op_long_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_int
- sub lr, lr, #(.L_ALT_op_float_to_int - .L_op_float_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_long
- sub lr, lr, #(.L_ALT_op_float_to_long - .L_op_float_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_float_to_double
- sub lr, lr, #(.L_ALT_op_float_to_double - .L_op_float_to_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_int
- sub lr, lr, #(.L_ALT_op_double_to_int - .L_op_double_to_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_long
- sub lr, lr, #(.L_ALT_op_double_to_long - .L_op_double_to_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_double_to_float
- sub lr, lr, #(.L_ALT_op_double_to_float - .L_op_double_to_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_byte
- sub lr, lr, #(.L_ALT_op_int_to_byte - .L_op_int_to_byte) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_char
- sub lr, lr, #(.L_ALT_op_int_to_char - .L_op_int_to_char) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_int_to_short
- sub lr, lr, #(.L_ALT_op_int_to_short - .L_op_int_to_short) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int
- sub lr, lr, #(.L_ALT_op_add_int - .L_op_add_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_int
- sub lr, lr, #(.L_ALT_op_sub_int - .L_op_sub_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int
- sub lr, lr, #(.L_ALT_op_mul_int - .L_op_mul_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int
- sub lr, lr, #(.L_ALT_op_div_int - .L_op_div_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int
- sub lr, lr, #(.L_ALT_op_rem_int - .L_op_rem_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int
- sub lr, lr, #(.L_ALT_op_and_int - .L_op_and_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int
- sub lr, lr, #(.L_ALT_op_or_int - .L_op_or_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int
- sub lr, lr, #(.L_ALT_op_xor_int - .L_op_xor_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int
- sub lr, lr, #(.L_ALT_op_shl_int - .L_op_shl_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int
- sub lr, lr, #(.L_ALT_op_shr_int - .L_op_shr_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int
- sub lr, lr, #(.L_ALT_op_ushr_int - .L_op_ushr_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_long
- sub lr, lr, #(.L_ALT_op_add_long - .L_op_add_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_long
- sub lr, lr, #(.L_ALT_op_sub_long - .L_op_sub_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_long
- sub lr, lr, #(.L_ALT_op_mul_long - .L_op_mul_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_long
- sub lr, lr, #(.L_ALT_op_div_long - .L_op_div_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_long
- sub lr, lr, #(.L_ALT_op_rem_long - .L_op_rem_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_long
- sub lr, lr, #(.L_ALT_op_and_long - .L_op_and_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_long
- sub lr, lr, #(.L_ALT_op_or_long - .L_op_or_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_long
- sub lr, lr, #(.L_ALT_op_xor_long - .L_op_xor_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_long
- sub lr, lr, #(.L_ALT_op_shl_long - .L_op_shl_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_long
- sub lr, lr, #(.L_ALT_op_shr_long - .L_op_shr_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_long
- sub lr, lr, #(.L_ALT_op_ushr_long - .L_op_ushr_long) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_float
- sub lr, lr, #(.L_ALT_op_add_float - .L_op_add_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_float
- sub lr, lr, #(.L_ALT_op_sub_float - .L_op_sub_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_float
- sub lr, lr, #(.L_ALT_op_mul_float - .L_op_mul_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_float
- sub lr, lr, #(.L_ALT_op_div_float - .L_op_div_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_float
- sub lr, lr, #(.L_ALT_op_rem_float - .L_op_rem_float) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_double
- sub lr, lr, #(.L_ALT_op_add_double - .L_op_add_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_double
- sub lr, lr, #(.L_ALT_op_sub_double - .L_op_sub_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_double
- sub lr, lr, #(.L_ALT_op_mul_double - .L_op_mul_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_double
- sub lr, lr, #(.L_ALT_op_div_double - .L_op_div_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_double
- sub lr, lr, #(.L_ALT_op_rem_double - .L_op_rem_double) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_2addr
- sub lr, lr, #(.L_ALT_op_add_int_2addr - .L_op_add_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_int_2addr
- sub lr, lr, #(.L_ALT_op_sub_int_2addr - .L_op_sub_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_2addr
- sub lr, lr, #(.L_ALT_op_mul_int_2addr - .L_op_mul_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_2addr
- sub lr, lr, #(.L_ALT_op_div_int_2addr - .L_op_div_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_2addr
- sub lr, lr, #(.L_ALT_op_rem_int_2addr - .L_op_rem_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_2addr
- sub lr, lr, #(.L_ALT_op_and_int_2addr - .L_op_and_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_2addr
- sub lr, lr, #(.L_ALT_op_or_int_2addr - .L_op_or_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_2addr
- sub lr, lr, #(.L_ALT_op_xor_int_2addr - .L_op_xor_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int_2addr
- sub lr, lr, #(.L_ALT_op_shl_int_2addr - .L_op_shl_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int_2addr
- sub lr, lr, #(.L_ALT_op_shr_int_2addr - .L_op_shr_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int_2addr
- sub lr, lr, #(.L_ALT_op_ushr_int_2addr - .L_op_ushr_int_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_long_2addr
- sub lr, lr, #(.L_ALT_op_add_long_2addr - .L_op_add_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_long_2addr
- sub lr, lr, #(.L_ALT_op_sub_long_2addr - .L_op_sub_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_long_2addr
- sub lr, lr, #(.L_ALT_op_mul_long_2addr - .L_op_mul_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_long_2addr
- sub lr, lr, #(.L_ALT_op_div_long_2addr - .L_op_div_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_long_2addr
- sub lr, lr, #(.L_ALT_op_rem_long_2addr - .L_op_rem_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_long_2addr
- sub lr, lr, #(.L_ALT_op_and_long_2addr - .L_op_and_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_long_2addr
- sub lr, lr, #(.L_ALT_op_or_long_2addr - .L_op_or_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_long_2addr
- sub lr, lr, #(.L_ALT_op_xor_long_2addr - .L_op_xor_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_long_2addr
- sub lr, lr, #(.L_ALT_op_shl_long_2addr - .L_op_shl_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_long_2addr
- sub lr, lr, #(.L_ALT_op_shr_long_2addr - .L_op_shr_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_long_2addr
- sub lr, lr, #(.L_ALT_op_ushr_long_2addr - .L_op_ushr_long_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_float_2addr
- sub lr, lr, #(.L_ALT_op_add_float_2addr - .L_op_add_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_float_2addr
- sub lr, lr, #(.L_ALT_op_sub_float_2addr - .L_op_sub_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_float_2addr
- sub lr, lr, #(.L_ALT_op_mul_float_2addr - .L_op_mul_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_float_2addr
- sub lr, lr, #(.L_ALT_op_div_float_2addr - .L_op_div_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_float_2addr
- sub lr, lr, #(.L_ALT_op_rem_float_2addr - .L_op_rem_float_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_double_2addr
- sub lr, lr, #(.L_ALT_op_add_double_2addr - .L_op_add_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_sub_double_2addr
- sub lr, lr, #(.L_ALT_op_sub_double_2addr - .L_op_sub_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_double_2addr
- sub lr, lr, #(.L_ALT_op_mul_double_2addr - .L_op_mul_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_double_2addr
- sub lr, lr, #(.L_ALT_op_div_double_2addr - .L_op_div_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_double_2addr
- sub lr, lr, #(.L_ALT_op_rem_double_2addr - .L_op_rem_double_2addr) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_lit16
- sub lr, lr, #(.L_ALT_op_add_int_lit16 - .L_op_add_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rsub_int
- sub lr, lr, #(.L_ALT_op_rsub_int - .L_op_rsub_int) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_lit16
- sub lr, lr, #(.L_ALT_op_mul_int_lit16 - .L_op_mul_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_lit16
- sub lr, lr, #(.L_ALT_op_div_int_lit16 - .L_op_div_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_lit16
- sub lr, lr, #(.L_ALT_op_rem_int_lit16 - .L_op_rem_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_lit16
- sub lr, lr, #(.L_ALT_op_and_int_lit16 - .L_op_and_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_lit16
- sub lr, lr, #(.L_ALT_op_or_int_lit16 - .L_op_or_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_lit16
- sub lr, lr, #(.L_ALT_op_xor_int_lit16 - .L_op_xor_int_lit16) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_add_int_lit8
- sub lr, lr, #(.L_ALT_op_add_int_lit8 - .L_op_add_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rsub_int_lit8
- sub lr, lr, #(.L_ALT_op_rsub_int_lit8 - .L_op_rsub_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_mul_int_lit8
- sub lr, lr, #(.L_ALT_op_mul_int_lit8 - .L_op_mul_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_div_int_lit8
- sub lr, lr, #(.L_ALT_op_div_int_lit8 - .L_op_div_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_rem_int_lit8
- sub lr, lr, #(.L_ALT_op_rem_int_lit8 - .L_op_rem_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_and_int_lit8
- sub lr, lr, #(.L_ALT_op_and_int_lit8 - .L_op_and_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_or_int_lit8
- sub lr, lr, #(.L_ALT_op_or_int_lit8 - .L_op_or_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_xor_int_lit8
- sub lr, lr, #(.L_ALT_op_xor_int_lit8 - .L_op_xor_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shl_int_lit8
- sub lr, lr, #(.L_ALT_op_shl_int_lit8 - .L_op_shl_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_shr_int_lit8
- sub lr, lr, #(.L_ALT_op_shr_int_lit8 - .L_op_shr_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_ushr_int_lit8
- sub lr, lr, #(.L_ALT_op_ushr_int_lit8 - .L_op_ushr_int_lit8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_quick
- sub lr, lr, #(.L_ALT_op_iget_quick - .L_op_iget_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_wide_quick
- sub lr, lr, #(.L_ALT_op_iget_wide_quick - .L_op_iget_wide_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_object_quick
- sub lr, lr, #(.L_ALT_op_iget_object_quick - .L_op_iget_object_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_quick
- sub lr, lr, #(.L_ALT_op_iput_quick - .L_op_iput_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_wide_quick
- sub lr, lr, #(.L_ALT_op_iput_wide_quick - .L_op_iput_wide_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_object_quick
- sub lr, lr, #(.L_ALT_op_iput_object_quick - .L_op_iput_object_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_quick
- sub lr, lr, #(.L_ALT_op_invoke_virtual_quick - .L_op_invoke_virtual_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_virtual_range_quick
- sub lr, lr, #(.L_ALT_op_invoke_virtual_range_quick - .L_op_invoke_virtual_range_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_boolean_quick
- sub lr, lr, #(.L_ALT_op_iput_boolean_quick - .L_op_iput_boolean_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_byte_quick
- sub lr, lr, #(.L_ALT_op_iput_byte_quick - .L_op_iput_byte_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_char_quick
- sub lr, lr, #(.L_ALT_op_iput_char_quick - .L_op_iput_char_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iput_short_quick
- sub lr, lr, #(.L_ALT_op_iput_short_quick - .L_op_iput_short_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_boolean_quick
- sub lr, lr, #(.L_ALT_op_iget_boolean_quick - .L_op_iget_boolean_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_byte_quick
- sub lr, lr, #(.L_ALT_op_iget_byte_quick - .L_op_iget_byte_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_char_quick
- sub lr, lr, #(.L_ALT_op_iget_char_quick - .L_op_iget_char_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_iget_short_quick
- sub lr, lr, #(.L_ALT_op_iget_short_quick - .L_op_iget_short_quick) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f3
- sub lr, lr, #(.L_ALT_op_unused_f3 - .L_op_unused_f3) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f4
- sub lr, lr, #(.L_ALT_op_unused_f4 - .L_op_unused_f4) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f5
- sub lr, lr, #(.L_ALT_op_unused_f5 - .L_op_unused_f5) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f6
- sub lr, lr, #(.L_ALT_op_unused_f6 - .L_op_unused_f6) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f7
- sub lr, lr, #(.L_ALT_op_unused_f7 - .L_op_unused_f7) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f8
- sub lr, lr, #(.L_ALT_op_unused_f8 - .L_op_unused_f8) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_unused_f9
- sub lr, lr, #(.L_ALT_op_unused_f9 - .L_op_unused_f9) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_polymorphic
- sub lr, lr, #(.L_ALT_op_invoke_polymorphic - .L_op_invoke_polymorphic) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_polymorphic_range
- sub lr, lr, #(.L_ALT_op_invoke_polymorphic_range - .L_op_invoke_polymorphic_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_custom
- sub lr, lr, #(.L_ALT_op_invoke_custom - .L_op_invoke_custom) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_invoke_custom_range
- sub lr, lr, #(.L_ALT_op_invoke_custom_range - .L_op_invoke_custom_range) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_method_handle
- sub lr, lr, #(.L_ALT_op_const_method_handle - .L_op_const_method_handle) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: arm/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
- adr lr, .L_ALT_op_const_method_type
- sub lr, lr, #(.L_ALT_op_const_method_type - .L_op_const_method_type) @ Addr of primary handler.
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rPC
- b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.
-
- .balign 128
-/* File: arm/instruction_end_alt.S */
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: arm/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
- cmp r0, #0 @ Exception pending?
- beq MterpFallback @ If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException @ (self, shadow_frame)
- cmp r0, #0
- beq MterpExceptionReturn @ no local catch, back to caller.
- ldr r0, [rFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr r1, [rFP, #OFF_FP_DEX_PC]
- ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add rPC, r0, r1, lsl #1 @ generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cmp r0, #0
- bne MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp rINST, #0
-MterpCommonTakenBranch:
- bgt .L_forward_branch @ don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmp rPROFILE, #JIT_CHECK_OSR
- beq .L_osr_check
- subsgt rPROFILE, #1
- beq .L_add_batch @ counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
- REFRESH_IBASE
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bne .L_suspend_request_pending
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov r0, rSELF
- bl MterpSuspendCheck @ (self)
- cmp r0, #0
- bne MterpFallback
- REFRESH_IBASE @ might have changed during suspend
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_no_count_backwards:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- bne .L_resume_backward_branch
-.L_osr_check:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
- beq .L_check_osr_forward
-.L_resume_forward_branch:
- add r2, rINST, rINST @ r2<- byte offset
- FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-.L_check_osr_forward:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add r1, rFP, #OFF_FP_SHADOWFRAME
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr r0, [rFP, #OFF_FP_METHOD]
- mov r2, rSELF
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
- cmp r0, #0
- bne MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip @ extract opcode from rINST
- GOTO_OPCODE ip @ jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rINST
- bl MterpLogOSR
-#endif
- mov r0, #1 @ Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov r0, rSELF
- add r1, rFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov r0, #0 @ signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov r0, #1 @ signal return to caller.
- b MterpDone
-MterpReturn:
- ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
- str r0, [r2]
- str r1, [r2, #4]
- mov r0, #1 @ signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp rPROFILE, #0
- bgt MterpProfileActive @ if > 0, we may have some counts to report.
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
-MterpProfileActive:
- mov rINST, r0 @ stash return value
- /* Report cached hotness counts */
- ldr r0, [rFP, #OFF_FP_METHOD]
- add r1, rFP, #OFF_FP_SHADOWFRAME
- mov r2, rSELF
- strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch @ (method, shadow_frame, self)
- mov r0, rINST @ restore return value
- ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
-
- END ExecuteMterpImpl
-
-
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
deleted file mode 100644
index 5f4aa4f..0000000
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ /dev/null
@@ -1,11791 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'arm64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: arm64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via xFP &
- number_of_vregs_.
-
- */
-
-/*
-ARM64 Runtime register usage conventions.
-
- r0 : w0 is 32-bit return register and x0 is 64-bit.
- r0-r7 : Argument registers.
- r8-r15 : Caller save registers (used as temporary registers).
- r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
- the linker, by the trampolines and other stubs (the backend uses
- these as temporary registers).
- r18 : Caller save register (used as temporary register).
- r19 : Pointer to thread-local storage.
- r20-r29: Callee save registers.
- r30 : (lr) is reserved (the link register).
- rsp : (sp) is reserved (the stack pointer).
- rzr : (zr) is reserved (the zero register).
-
- Floating-point registers
- v0-v31
-
- v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit).
- This is analogous to the C/C++ (hard-float) calling convention.
- v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions.
- Also used as temporary and codegen scratch registers.
-
- v0-v7 and v16-v31 : trashed across C calls.
- v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved).
-
- v16-v31: Used as codegen temp/scratch.
- v8-v15 : Can be used for promotion.
-
- Must maintain 16-byte stack alignment.
-
-Mterp notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- x20 xPC interpreted program counter, used for fetching instructions
- x21 xFP interpreted frame pointer, used for accessing locals and args
- x22 xSELF self (Thread) pointer
- x23 xINST first 16-bit code unit of current instruction
- x24 xIBASE interpreted instruction base pointer, used for computed goto
- x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- x26 wPROFILE jit profile hotness countdown
- x16 ip scratch reg
- x17 ip2 scratch reg (used by macros)
-
-Macros are provided for common operations. They MUST NOT alter unspecified registers or condition
-codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/* During bringup, we'll use the shadow frame model instead of xFP */
-/* single-purpose registers, given names for clarity */
-#define xPC x20
-#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC).
-#define CFI_TMP 0 // DWARF register number of the first argument register (r0).
-#define xFP x21
-#define xSELF x22
-#define xINST x23
-#define wINST w23
-#define xIBASE x24
-#define xREFS x25
-#define wPROFILE w26
-#define xPROFILE x26
-#define ip x16
-#define ip2 x17
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- str xPC, [xFP, #OFF_FP_DEX_PC_PTR]
-.endm
-
-/*
- * Fetch the next instruction from xPC into wINST. Does not advance xPC.
- */
-.macro FETCH_INST
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances xPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * Because of the limited size of immediate constants on ARM, this is only
- * suitable for small forward movements (i.e. don't try to implement "goto"
- * with this).
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ldrh wINST, [xPC, #((\count)*2)]!
-.endm
-
-/*
- * The operation performed here is similar to FETCH_ADVANCE_INST, except the
- * src and dest registers are parameterized (not hard-wired to xPC and xINST).
- */
-.macro PREFETCH_ADVANCE_INST dreg, sreg, count
- ldrh \dreg, [\sreg, #((\count)*2)]!
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load
- * xINST ahead of possible exception point. Be sure to manually advance xPC
- * later.
- */
-.macro PREFETCH_INST count
- ldrh wINST, [xPC, #((\count)*2)]
-.endm
-
-/* Advance xPC by some number of code units. */
-.macro ADVANCE count
- add xPC, xPC, #((\count)*2)
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- add xPC, xPC, \reg, sxtw
- ldrh wINST, [xPC]
-.endm
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance xPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-.macro FETCH reg, count
- ldrh \reg, [xPC, #((\count)*2)]
-.endm
-
-.macro FETCH_S reg, count
- ldrsh \reg, [xPC, #((\count)*2)]
-.endm
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-.macro FETCH_B reg, count, byte
- ldrb \reg, [xPC, #((\count)*2+(\byte))]
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, xINST, #255
-.endm
-
-/*
- * Put the prefetched instruction's opcode field into the specified register.
- */
-.macro GET_PREFETCHED_OPCODE oreg, ireg
- and \oreg, \ireg, #255
-.endm
-
-/*
- * Begin executing the opcode in _reg. Clobbers reg
- */
-
-.macro GOTO_OPCODE reg
- add \reg, xIBASE, \reg, lsl #7
- br \reg
-.endm
-.macro GOTO_OPCODE_BASE base,reg
- add \reg, \base, \reg, lsl #7
- br \reg
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-.macro GET_VREG reg, vreg
- ldr \reg, [xFP, \vreg, uxtw #2]
-.endm
-.macro SET_VREG reg, vreg
- str \reg, [xFP, \vreg, uxtw #2]
- str wzr, [xREFS, \vreg, uxtw #2]
-.endm
-.macro SET_VREG_OBJECT reg, vreg, tmpreg
- str \reg, [xFP, \vreg, uxtw #2]
- str \reg, [xREFS, \vreg, uxtw #2]
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
- */
-.macro GET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- ldr \reg, [ip2]
-.endm
-.macro SET_VREG_WIDE reg, vreg
- add ip2, xFP, \vreg, lsl #2
- str \reg, [ip2]
- add ip2, xREFS, \vreg, lsl #2
- str xzr, [ip2]
-.endm
-
-/*
- * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
- * Used to avoid an extra instruction in int-to-long.
- */
-.macro GET_VREG_S reg, vreg
- ldrsw \reg, [xFP, \vreg, uxtw #2]
-.endm
-
-/*
- * Convert a virtual register index into an address.
- */
-.macro VREG_INDEX_TO_ADDR reg, vreg
- add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-.endm
-
-/*
- * Save two registers to the stack.
- */
-.macro SAVE_TWO_REGS reg1, reg2, offset
- stp \reg1, \reg2, [sp, #(\offset)]
- .cfi_rel_offset \reg1, (\offset)
- .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-/*
- * Restore two registers from the stack.
- */
-.macro RESTORE_TWO_REGS reg1, reg2, offset
- ldp \reg1, \reg2, [sp, #(\offset)]
- .cfi_restore \reg1
- .cfi_restore \reg2
-.endm
-
-/*
- * Increase frame size and save two registers to the bottom of the stack.
- */
-.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
- stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
- .cfi_adjust_cfa_offset (\frame_adjustment)
- .cfi_rel_offset \reg1, 0
- .cfi_rel_offset \reg2, 8
-.endm
-
-/*
- * Restore two registers from the bottom of the stack and decrease frame size.
- */
-.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment
- ldp \reg1, \reg2, [sp], #(\frame_adjustment)
- .cfi_restore \reg1
- .cfi_restore \reg2
- .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-/*
- * cfi support macros.
- */
-.macro ENTRY name
- .type \name, #function
- .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
-/* File: arm64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
- .text
-
-/*
- * Interpreter entry point.
- * On entry:
- * x0 Thread* self/
- * x1 insns_
- * x2 ShadowFrame
- * x3 JValue* result_register
- *
- */
-ENTRY ExecuteMterpImpl
- SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80
- SAVE_TWO_REGS xIBASE, xREFS, 16
- SAVE_TWO_REGS xSELF, xINST, 32
- SAVE_TWO_REGS xPC, xFP, 48
- SAVE_TWO_REGS fp, lr, 64
- add fp, sp, #64
-
- /* Remember the return register */
- str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
-
- /* Remember the dex instruction pointer */
- str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET]
-
- /* set up "named" registers */
- mov xSELF, x0
- ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
- add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs.
- add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame
- ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc.
- add xPC, x1, w0, lsl #1 // Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
-
- /* Set up for backwards branches & osr profiling */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- bl MterpSetUpHotnessCountdown
- mov wPROFILE, w0 // Starting hotness countdown to xPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST // load wINST from rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
- /* NOTE: no fallthrough */
-
-/* File: arm64/instruction_start.S */
-
- .type artMterpAsmInstructionStart, #object
- .hidden artMterpAsmInstructionStart
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: arm64/op_nop.S */
- FETCH_ADVANCE_INST 1 // advance to next instr, load rINST
- GET_INST_OPCODE ip // ip<- opcode from rINST
- GOTO_OPCODE ip // execute it
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: arm64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: arm64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: arm64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: arm64/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lsr w3, wINST, #12 // w3<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: arm64/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 1 // w3<- BBBB
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x3, w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: arm64/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- FETCH w3, 2 // w3<- BBBB
- FETCH w2, 1 // w2<- AAAA
- GET_VREG_WIDE x3, w3
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- SET_VREG_WIDE x3, w2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: arm64/op_move_object.S */
-/* File: arm64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: arm64/op_move_object_from16.S */
-/* File: arm64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH w1, 1 // r1<- BBBB
- lsr w0, wINST, #8 // r0<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_VREG w2, w1 // r2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[AA]<- r2
- .else
- SET_VREG w2, w0 // fp[AA]<- r2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: arm64/op_move_object_16.S */
-/* File: arm64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH w1, 2 // w1<- BBBB
- FETCH w0, 1 // w0<- AAAA
- FETCH_ADVANCE_INST 3 // advance xPC, load xINST
- GET_VREG w2, w1 // w2<- fp[BBBB]
- GET_INST_OPCODE ip // extract opcode from xINST
- .if 1
- SET_VREG_OBJECT w2, w0 // fp[AAAA]<- w2
- .else
- SET_VREG w2, w0 // fp[AAAA]<- w2
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: arm64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 0
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: arm64/op_move_result_wide.S */
- /* for: move-result-wide */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr x0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, x2 // fp[AA]<- r0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: arm64/op_move_result_object.S */
-/* File: arm64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- lsr w2, wINST, #8 // r2<- AA
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- ldr x0, [xFP, #OFF_FP_RESULT_REGISTER] // get pointer to result JType.
- ldr w0, [x0] // r0 <- result.i.
- GET_INST_OPCODE ip // extract opcode from wINST
- .if 1
- SET_VREG_OBJECT w0, w2, w1 // fp[AA]<- r0
- .else
- SET_VREG w0, w2 // fp[AA]<- r0
- .endif
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: arm64/op_move_exception.S */
- /* move-exception vAA */
- lsr w2, wINST, #8 // w2<- AA
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- mov x1, #0 // w1<- 0
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- SET_VREG_OBJECT w3, w2 // fp[AA]<- exception obj
- GET_INST_OPCODE ip // extract opcode from rINST
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // clear exception
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: arm64/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_void_check
-.Lop_return_void_return:
- mov x0, #0
- b MterpReturn
-.Lop_return_void_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_void_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: arm64/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_check
-.Lop_return_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.Lop_return_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: arm64/op_return_wide.S */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_wide_check
-.Lop_return_wide_return:
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG_WIDE x0, w2 // x0<- vAA
- b MterpReturn
-.Lop_return_wide_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_wide_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: arm64/op_return_object.S */
-/* File: arm64/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- bl MterpThreadFenceForConstructor
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_object_check
-.Lop_return_object_return:
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w0, w2 // r0<- vAA
- b MterpReturn
-.Lop_return_object_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_object_return
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: arm64/op_const_4.S */
- /* const/4 vA, #+B */
- sbfx w1, wINST, #12, #4 // w1<- sssssssB
- ubfx w0, wINST, #8, #4 // w0<- A
- FETCH_ADVANCE_INST 1 // advance xPC, load wINST
- GET_INST_OPCODE ip // ip<- opcode from xINST
- SET_VREG w1, w0 // fp[A]<- w1
- GOTO_OPCODE ip // execute next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: arm64/op_const_16.S */
- /* const/16 vAA, #+BBBB */
- FETCH_S w0, 1 // w0<- ssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance xPC, load wINST
- SET_VREG w0, w3 // vAA<- w0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: arm64/op_const.S */
- /* const vAA, #+BBBBbbbb */
- lsr w3, wINST, #8 // w3<- AA
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w1, 2 // w1<- BBBB (high
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w3 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: arm64/op_const_high16.S */
- /* const/high16 vAA, #+BBBB0000 */
- FETCH w0, 1 // r0<- 0000BBBB (zero-extended)
- lsr w3, wINST, #8 // r3<- AA
- lsl w0, w0, #16 // r0<- BBBB0000
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- SET_VREG w0, w3 // vAA<- r0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: arm64/op_const_wide_16.S */
- /* const-wide/16 vAA, #+BBBB */
- FETCH_S x0, 1 // x0<- ssssssssssssBBBB (sign-extended)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: arm64/op_const_wide_32.S */
- /* const-wide/32 vAA, #+BBBBbbbb */
- FETCH w0, 1 // x0<- 000000000000bbbb (low)
- lsr w3, wINST, #8 // w3<- AA
- FETCH_S x2, 2 // x2<- ssssssssssssBBBB (high)
- FETCH_ADVANCE_INST 3 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr x0, x0, x2, lsl #16 // x0<- ssssssssBBBBbbbb
- SET_VREG_WIDE x0, w3
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: arm64/op_const_wide.S */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- FETCH w0, 1 // w0<- bbbb (low)
- FETCH w1, 2 // w1<- BBBB (low middle)
- FETCH w2, 3 // w2<- hhhh (high middle)
- FETCH w3, 4 // w3<- HHHH (high)
- lsr w4, wINST, #8 // r4<- AA
- FETCH_ADVANCE_INST 5 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- orr w0, w0, w1, lsl #16 // w0<- BBBBbbbb
- orr x0, x0, x2, lsl #32 // w0<- hhhhBBBBbbbb
- orr x0, x0, x3, lsl #48 // w0<- HHHHhhhhBBBBbbbb
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: arm64/op_const_wide_high16.S */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- FETCH w0, 1 // w0<- 0000BBBB (zero-extended)
- lsr w1, wINST, #8 // w1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- lsl x0, x0, #48
- SET_VREG_WIDE x0, w1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: arm64/op_const_string.S */
-/* File: arm64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: arm64/op_const_string_jumbo.S */
- /* const/string vAA, String//BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- bbbb (low
- FETCH w2, 2 // w2<- BBBB (high
- lsr w1, wINST, #8 // w1<- AA
- orr w0, w0, w2, lsl #16 // w1<- BBBBbbbb
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstString // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 // advance rPC
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 3 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: arm64/op_const_class.S */
-/* File: arm64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstClass // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: arm64/op_monitor_enter.S */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w1<- self
- bl artLockObjectFromCode
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: arm64/op_monitor_exit.S */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // w2<- AA
- GET_VREG w0, w2 // w0<- vAA (object)
- mov x1, xSELF // w0<- self
- bl artUnlockObjectFromCode // w0<- success for unlock(self, obj)
- cbnz w0, MterpException
- FETCH_ADVANCE_INST 1 // before throw: advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: arm64/op_check_cast.S */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpCheckCast // (index, &obj, method, self)
- PREFETCH_INST 2
- cbnz w0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: arm64/op_instance_of.S */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- EXPORT_PC
- FETCH w0, 1 // w0<- CCCC
- lsr w1, wINST, #12 // w1<- B
- VREG_INDEX_TO_ADDR x1, w1 // w1<- &object
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- method
- mov x3, xSELF // w3<- self
- bl MterpInstanceOf // (index, &obj, method, self)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x1, MterpException
- ADVANCE 2 // advance rPC
- SET_VREG w0, w2 // vA<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: arm64/op_array_length.S */
- /*
- * Return the length of an array.
- */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w0, w1 // w0<- vB (object ref)
- cbz w0, common_errNullObject // yup, fail
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- array length
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w3, w2 // vB<- length
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: arm64/op_new_instance.S */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xSELF
- mov w2, wINST
- bl MterpNewInstance // (shadow_frame, self, inst_data)
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: arm64/op_new_array.S */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: arm64/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl MterpFilledNewArray
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: arm64/op_filled_new_array_range.S */
-/* File: arm64/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xSELF
- bl MterpFilledNewArrayRange
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: arm64/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x1, x0, x1, lsl #16 // x1<- ssssssssBBBBbbbb
- GET_VREG w0, w3 // w0<- vAA (array object)
- add x1, xPC, x1, lsl #1 // x1<- PC + ssssssssBBBBbbbb*2 (array data off.)
- bl MterpFillArrayData // (obj, payload)
- cbz w0, MterpPossibleException // exception?
- FETCH_ADVANCE_INST 3 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: arm64/op_throw.S */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- lsr w2, wINST, #8 // r2<- AA
- GET_VREG w1, w2 // r1<- vAA (exception object)
- cbz w1, common_errNullObject
- str x1, [xSELF, #THREAD_EXCEPTION_OFFSET] // thread->exception<- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: arm64/op_goto.S */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sbfx wINST, wINST, #8, #8 // wINST<- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: arm64/op_goto_16.S */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S wINST, 1 // wINST<- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: arm64/op_goto_32.S */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- FETCH w0, 1 // w0<- aaaa (lo)
- FETCH w1, 2 // w1<- AAAA (hi)
- orr wINST, w0, w1, lsl #16 // wINST<- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: arm64/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl MterpDoPackedSwitch // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: arm64/op_sparse_switch.S */
-/* File: arm64/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH w0, 1 // x0<- 000000000000bbbb (lo)
- FETCH_S x1, 2 // x1<- ssssssssssssBBBB (hi)
- lsr w3, wINST, #8 // w3<- AA
- orr x0, x0, x1, lsl #16 // x0<- ssssssssBBBBbbbb
- GET_VREG w1, w3 // w1<- vAA
- add x0, xPC, x0, lsl #1 // x0<- PC + ssssssssBBBBbbbb*2
- bl MterpDoSparseSwitch // w0<- code-unit branch offset
- sxtw xINST, w0
- b MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: arm64/op_cmpl_float.S */
-/* File: arm64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG s1, w2
- GET_VREG s2, w3
- fcmp s1, s2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: arm64/op_cmpg_float.S */
-/* File: arm64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG s1, w2
- GET_VREG s2, w3
- fcmp s1, s2
- cset w0, ne
- cneg w0, w0, cc
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: arm64/op_cmpl_double.S */
-/* File: arm64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE d1, w2
- GET_VREG_WIDE d2, w3
- fcmp d1, d2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: arm64/op_cmpg_double.S */
-/* File: arm64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- */
- /* op vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE d1, w2
- GET_VREG_WIDE d2, w3
- fcmp d1, d2
- cset w0, ne
- cneg w0, w0, cc
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w4 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: arm64/op_cmp_long.S */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG_WIDE x1, w2
- GET_VREG_WIDE x2, w3
- cmp x1, x2
- cset w0, ne
- cneg w0, w0, lt
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG w0, w4
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: arm64/op_if_eq.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.eq MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: arm64/op_if_ne.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.ne MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: arm64/op_if_lt.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.lt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: arm64/op_if_ge.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.ge MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: arm64/op_if_gt.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.gt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: arm64/op_if_le.S */
-/* File: arm64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- lsr w1, wINST, #12 // w1<- B
- ubfx w0, wINST, #8, #4 // w0<- A
- GET_VREG w3, w1 // w3<- vB
- GET_VREG w2, w0 // w2<- vA
- FETCH_S wINST, 1 // wINST<- branch offset, in code units
- cmp w2, w3 // compare (vA, vB)
- b.le MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: arm64/op_if_eqz.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- cbz w2, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: arm64/op_if_nez.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- cbnz w2, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: arm64/op_if_ltz.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- tbnz w2, #31, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: arm64/op_if_gez.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 0
- cmp w2, #0 // compare (vA, 0)
- .endif
- tbz w2, #31, MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: arm64/op_if_gtz.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 1
- cmp w2, #0 // compare (vA, 0)
- .endif
- b.gt MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: arm64/op_if_lez.S */
-/* File: arm64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- lsr w0, wINST, #8 // w0<- AA
- GET_VREG w2, w0 // w2<- vAA
- FETCH_S wINST, 1 // w1<- branch offset, in code units
- .if 1
- cmp w2, #0 // compare (vA, 0)
- .endif
- b.le MterpCommonTakenBranchNoFlags
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: arm64/op_unused_3e.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: arm64/op_unused_3f.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: arm64/op_unused_40.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: arm64/op_unused_41.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: arm64/op_unused_42.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: arm64/op_unused_43.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: arm64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #2 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldr w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: arm64/op_aget_wide.S */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // yes, bail
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- ldr x2, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] // x2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x2, w4
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: arm64/op_aget_object.S */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- FETCH_B w3, 1, 1 // w3<- CC
- EXPORT_PC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- bl artAGetObjectFromMterp // (array, index)
- ldr x1, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w9<- AA
- PREFETCH_INST 2
- cbnz w1, MterpException
- SET_VREG_OBJECT w0, w2
- ADVANCE 2
- GET_INST_OPCODE ip
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: arm64/op_aget_boolean.S */
-/* File: arm64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: arm64/op_aget_byte.S */
-/* File: arm64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrsb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: arm64/op_aget_char.S */
-/* File: arm64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: arm64/op_aget_short.S */
-/* File: arm64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz x0, common_errNullObject // bail if null array object.
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, uxtw #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- ldrsh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // w2<- vBB[vCC]
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w2, w9 // vAA<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: arm64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #2 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- str w2, [x0, #MIRROR_INT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: arm64/op_aput_wide.S */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- and w2, w0, #255 // w2<- BB
- lsr w3, w0, #8 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #3 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- GET_VREG_WIDE x1, w4
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- str x1, [x0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: arm64/op_aput_object.S */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpAputObject
- cbz w0, MterpPossibleException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: arm64/op_aput_boolean.S */
-/* File: arm64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strb w2, [x0, #MIRROR_BOOLEAN_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: arm64/op_aput_byte.S */
-/* File: arm64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #0 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strb w2, [x0, #MIRROR_BYTE_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: arm64/op_aput_char.S */
-/* File: arm64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strh w2, [x0, #MIRROR_CHAR_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: arm64/op_aput_short.S */
-/* File: arm64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B w2, 1, 0 // w2<- BB
- lsr w9, wINST, #8 // w9<- AA
- FETCH_B w3, 1, 1 // w3<- CC
- GET_VREG w0, w2 // w0<- vBB (array object)
- GET_VREG w1, w3 // w1<- vCC (requested index)
- cbz w0, common_errNullObject // bail if null
- ldr w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // w3<- arrayObj->length
- add x0, x0, w1, lsl #1 // w0<- arrayObj + index*width
- cmp w1, w3 // compare unsigned index, length
- bcs common_errArrayIndex // index >= length, bail
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_VREG w2, w9 // w2<- vAA
- GET_INST_OPCODE ip // extract opcode from rINST
- strh w2, [x0, #MIRROR_SHORT_ARRAY_DATA_OFFSET] // vBB[vCC]<- w2
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetU32
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 0
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: arm64/op_iget_wide.S */
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetU64
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cmp w3, #0
- cbnz w3, MterpException // bail out
- SET_VREG_WIDE x0, w2
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: arm64/op_iget_object.S */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetObj
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
-
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 1
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: arm64/op_iget_boolean.S */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetU8
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- uxtb w0, w0
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 0
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: arm64/op_iget_byte.S */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetI8
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- sxtb w0, w0
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 0
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: arm64/op_iget_char.S */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetU16
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- uxth w0, w0
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 0
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: arm64/op_iget_short.S */
-/* File: arm64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ldr x2, [xFP, #OFF_FP_METHOD] // w2<- referrer
- mov x3, xSELF // w3<- self
- bl MterpIGetI16
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- sxth w0, w0
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz x3, MterpPossibleException // bail out
- .if 0
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- .else
- SET_VREG w0, w2 // fp[A]<- w0
- .endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: arm64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU32
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU32
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: arm64/op_iput_wide.S */
- /* iput-wide vA, vB, field//CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU64
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: arm64/op_iput_object.S */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- mov x3, xSELF
- bl MterpIPutObj
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: arm64/op_iput_boolean.S */
-/* File: arm64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU8
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU8
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: arm64/op_iput_byte.S */
-/* File: arm64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutI8
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutI8
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: arm64/op_iput_char.S */
-/* File: arm64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU16
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutU16
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: arm64/op_iput_short.S */
-/* File: arm64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutI16
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref CCCC
- lsr w1, wINST, #12 // w1<- B
- GET_VREG w1, w1 // w1<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w2, w2 // w2<- fp[A]
- ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer
- PREFETCH_INST 2
- bl MterpIPutI16
- cbnz w0, MterpPossibleException
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetU32
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetU32
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
-
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 0
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: arm64/op_sget_wide.S */
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field//BBBB */
-
- .extern MterpGet64StaticFromCode
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetU64
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w4, wINST, #8 // w4<- AA
- cbnz x3, MterpException // bail out
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG_WIDE x0, w4
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: arm64/op_sget_object.S */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetObj
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetObj
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
-
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 1
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: arm64/op_sget_boolean.S */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetU8
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetU8
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
- uxtb w0, w0
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 0
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: arm64/op_sget_byte.S */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetI8
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetI8
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
- sxtb w0, w0
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 0
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: arm64/op_sget_char.S */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetU16
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetU16
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
- uxth w0, w0
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 0
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: arm64/op_sget_short.S */
-/* File: arm64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
-
- .extern MterpSGetI16
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- ldr x1, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpSGetI16
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- lsr w2, wINST, #8 // w2<- AA
- sxth w0, w0
- PREFETCH_INST 2
- cbnz x3, MterpException // bail out
-.if 0
- SET_VREG_OBJECT w0, w2 // fp[AA]<- w0
-.else
- SET_VREG w0, w2 // fp[AA]<- w0
-.endif
- ADVANCE 2
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: arm64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutU32
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: arm64/op_sput_wide.S */
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field//BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- FETCH w0, 1 // w0<- field ref BBBB
- lsr w1, wINST, #8 // w1<- AA
- VREG_INDEX_TO_ADDR x1, w1
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutU64
- cbnz w0, MterpException // 0 on success, -1 on failure
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: arm64/op_sput_object.S */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov x2, xINST
- mov x3, xSELF
- bl MterpSPutObj
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: arm64/op_sput_boolean.S */
-/* File: arm64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutU8
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: arm64/op_sput_byte.S */
-/* File: arm64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutI8
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: arm64/op_sput_char.S */
-/* File: arm64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutU16
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: arm64/op_sput_short.S */
-/* File: arm64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- EXPORT_PC
- FETCH w0, 1 // r0<- field ref BBBB
- lsr w3, wINST, #8 // r3<- AA
- GET_VREG w1, w3 // r1<= fp[AA]
- ldr x2, [xFP, #OFF_FP_METHOD]
- mov x3, xSELF
- PREFETCH_INST 2 // Get next inst, but don't advance rPC
- bl MterpSPutI16
- cbnz w0, MterpException // 0 on success
- ADVANCE 2 // Past exception point - now advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: arm64/op_invoke_virtual.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtual
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: arm64/op_invoke_super.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeSuper
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: arm64/op_invoke_direct.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeDirect
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: arm64/op_invoke_static.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeStatic
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: arm64/op_invoke_interface.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeInterface
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: arm64/op_return_void_no_barrier.S */
- ldr w7, [xSELF, #THREAD_FLAGS_OFFSET]
- mov x0, xSELF
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .Lop_return_void_no_barrier_check
-.Lop_return_void_no_barrier_return:
- mov x0, #0
- b MterpReturn
-.Lop_return_void_no_barrier_check:
- bl MterpSuspendCheck // (self)
- b .Lop_return_void_no_barrier_return
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: arm64/op_invoke_virtual_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: arm64/op_invoke_super_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeSuperRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: arm64/op_invoke_direct_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeDirectRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: arm64/op_invoke_static_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeStaticRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: arm64/op_invoke_interface_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeInterfaceRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: arm64/op_unused_79.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: arm64/op_unused_7a.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: arm64/op_neg_int.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sub w0, wzr, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: arm64/op_not_int.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- mvn w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: arm64/op_neg_long.S */
-/* File: arm64/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- sub x0, xzr, x0
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: arm64/op_not_long.S */
-/* File: arm64/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- mvn x0, x0
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: arm64/op_neg_float.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- eor w0, w0, #0x80000000 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: arm64/op_neg_double.S */
-/* File: arm64/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op x0".
- *
- * For: neg-long, not-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- eor x0, x0, #0x8000000000000000
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4
- GOTO_OPCODE ip // jump to next instruction
- /* 10-11 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: arm64/op_int_to_long.S */
- /* int-to-long vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_S x0, w3 // x0<- sign_extend(fp[B])
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // fp[A]<- x0
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: arm64/op_int_to_float.S */
-/* File: arm64/funopNarrow.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op w0".
- *
- * For: int-to-float, float-to-int
- * TODO: refactor all of the conversions - parameterize width and use same template.
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG w0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf s0, w0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: arm64/op_int_to_double.S */
-/* File: arm64/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op w0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG w0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf d0, w0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: arm64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: arm64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- lsr w1, wINST, #12 // x1<- B from 15:12
- ubfx w0, wINST, #8, #4 // x0<- A from 11:8
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- GET_VREG w2, w1 // x2<- fp[B]
- GET_INST_OPCODE ip // ip<- opcode from wINST
- .if 0
- SET_VREG_OBJECT w2, w0 // fp[A]<- x2
- .else
- SET_VREG w2, w0 // fp[A]<- x2
- .endif
- GOTO_OPCODE ip // execute next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: arm64/op_long_to_float.S */
-/* File: arm64/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op x0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf s0, x0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: arm64/op_long_to_double.S */
-/* File: arm64/funopWide.S */
- /*
- * Generic 64bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op x0".
- *
- * For: long-to-double, double-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE x0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- scvtf d0, x0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: arm64/op_float_to_int.S */
-/* File: arm64/funopNarrow.S */
- /*
- * Generic 32bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "w0 = op s0".
- *
- * For: int-to-float, float-to-int
- * TODO: refactor all of the conversions - parameterize width and use same template.
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs w0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: arm64/op_float_to_long.S */
-/* File: arm64/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "x0 = op s0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs x0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: arm64/op_float_to_double.S */
-/* File: arm64/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "d0 = op s0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG s0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvt d0, s0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE d0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: arm64/op_double_to_int.S */
-/* File: arm64/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "w0 = op d0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs w0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG w0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: arm64/op_double_to_long.S */
-/* File: arm64/funopWide.S */
- /*
- * Generic 64bit-to-64bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "x0 = op d0".
- *
- * For: long-to-double, double-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvtzs x0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG_WIDE x0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: arm64/op_double_to_float.S */
-/* File: arm64/funopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating point unary operation. Provide an
- * "instr" line that specifies an instruction that performs "s0 = op d0".
- *
- * For: int-to-double, float-to-double, float-to-long
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w4, wINST, #8, #4 // w4<- A
- GET_VREG_WIDE d0, w3
- FETCH_ADVANCE_INST 1 // advance rPC, load wINST
- fcvt s0, d0 // d0<- op
- GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG s0, w4 // vA<- d0
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: arm64/op_int_to_byte.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sxtb w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: arm64/op_int_to_char.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- uxth w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: arm64/op_int_to_short.S */
-/* File: arm64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op w0".
- * This could be an ARM instruction or a function call.
- *
- * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
- * int-to-byte, int-to-char, int-to-short
- */
- /* unop vA, vB */
- lsr w3, wINST, #12 // w3<- B
- GET_VREG w0, w3 // w0<- vB
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sxth w0, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 8-9 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: arm64/op_add_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: arm64/op_sub_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: arm64/op_mul_int.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: arm64/op_div_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 1
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: arm64/op_rem_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 1
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w2, w0, w1 // optional op; may set condition codes
- msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: arm64/op_and_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: arm64/op_or_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: arm64/op_xor_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: arm64/op_shl_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: arm64/op_shr_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: arm64/op_ushr_int.S */
-/* File: arm64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the ARM math lib
- * handles it correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
- * mul-float, div-float, rem-float
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w9, wINST, #8 // w9<- AA
- lsr w3, w0, #8 // w3<- CC
- and w2, w0, #255 // w2<- BB
- GET_VREG w1, w3 // w1<- vCC
- GET_VREG w0, w2 // w0<- vBB
- .if 0
- cbz w1, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: arm64/op_add_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- add x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: arm64/op_sub_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sub x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: arm64/op_mul_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- mul x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: arm64/op_div_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 1
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sdiv x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: arm64/op_rem_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 1
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv x3, x1, x2
- msub x0, x3, x2, x1 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: arm64/op_and_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- and x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: arm64/op_or_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- orr x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: arm64/op_xor_long.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x2, w2 // w2<- vCC
- GET_VREG_WIDE x1, w1 // w1<- vBB
- .if 0
- cbz x2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- eor x0, x1, x2 // x0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w4 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: arm64/op_shl_long.S */
-/* File: arm64/shiftWide.S */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- lsl x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: arm64/op_shr_long.S */
-/* File: arm64/shiftWide.S */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- asr x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: arm64/op_ushr_long.S */
-/* File: arm64/shiftWide.S */
- /*
- * 64-bit shift operation.
- *
- * For: shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w3, wINST, #8 // w3<- AA
- lsr w2, w0, #8 // w2<- CC
- GET_VREG w2, w2 // w2<- vCC (shift count)
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE x1, w1 // x1<- vBB
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- lsr x0, x1, x2 // Do the shift. Only low 6 bits of x2 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w3 // vAA<- x0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: arm64/op_add_float.S */
-/* File: arm64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fadd s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: arm64/op_sub_float.S */
-/* File: arm64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fsub s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: arm64/op_mul_float.S */
-/* File: arm64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fmul s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: arm64/op_div_float.S */
-/* File: arm64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- fdiv s0, s0, s1 // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: arm64/op_rem_float.S */
-/* EABI doesn't define a float remainder function, but libm does */
-/* File: arm64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float
- * form: <op> s0, s0, s1
- */
- /* floatop vAA, vBB, vCC */
- FETCH w0, 1 // r0<- CCBB
- lsr w1, w0, #8 // r2<- CC
- and w0, w0, #255 // r1<- BB
- GET_VREG s1, w1
- GET_VREG s0, w0
- bl fmodf // s0<- op
- lsr w1, wINST, #8 // r1<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: arm64/op_add_double.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fadd d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: arm64/op_sub_double.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fsub d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: arm64/op_mul_double.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fmul d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: arm64/op_div_double.S */
-/* File: arm64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = x1 op x2".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than x0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, add-double, sub-double, mul-double, div-double, rem-double
- */
- /* binop vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w4, wINST, #8 // w4<- AA
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d2, w2 // w2<- vCC
- GET_VREG_WIDE d1, w1 // w1<- vBB
- .if 0
- cbz d2, common_errDivideByZero // is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- fdiv d0, d1, d2 // d0<- op, w0-w4 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- d0
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: arm64/op_rem_double.S */
- /* rem vAA, vBB, vCC */
- FETCH w0, 1 // w0<- CCBB
- lsr w2, w0, #8 // w2<- CC
- and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d1, w2 // d1<- vCC
- GET_VREG_WIDE d0, w1 // d0<- vBB
- bl fmod
- lsr w4, wINST, #8 // w4<- AA
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w4 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 11-14 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: arm64/op_add_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: arm64/op_sub_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: arm64/op_mul_int_2addr.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: arm64/op_div_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: arm64/op_rem_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sdiv w2, w0, w1 // optional op; may set condition codes
- msub w0, w2, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: arm64/op_and_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: arm64/op_or_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: arm64/op_xor_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: arm64/op_shl_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: arm64/op_shr_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: arm64/op_ushr_int_2addr.S */
-/* File: arm64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w1, w3 // w1<- vB
- GET_VREG w0, w9 // w0<- vA
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: arm64/op_add_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- add x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: arm64/op_sub_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- sub x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: arm64/op_mul_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- mul x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: arm64/op_div_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 1
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- sdiv x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: arm64/op_rem_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 1
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- sdiv x3, x0, x1
- msub x0, x3, x1, x0 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: arm64/op_and_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- and x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: arm64/op_or_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- orr x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: arm64/op_xor_long_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE x1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- .if 0
- cbz x1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- eor x0, x0, x1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: arm64/op_shl_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- lsl x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: arm64/op_shr_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- asr x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm64/op_ushr_long_2addr.S */
-/* File: arm64/shiftWide2addr.S */
- /*
- * Generic 64-bit shift operation.
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG w1, w1 // x1<- vB
- GET_VREG_WIDE x0, w2 // x0<- vA
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- lsr x0, x0, x1 // Do the shift. Only low 6 bits of x1 are used.
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE x0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: arm64/op_add_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fadd s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: arm64/op_sub_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fsub s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: arm64/op_mul_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fmul s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: arm64/op_div_float_2addr.S */
-/* File: arm64/fbinop2addr.S */
- /*
- * Generic 32-bit floating point "/2addr" binary operation. Provide
- * an "instr" line that specifies an instruction that performs
- * "s2 = s0 op s1".
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
- */
- /* binop/2addr vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- fdiv s2, s0, s1 // s2<- op
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: arm64/op_rem_float_2addr.S */
- /* rem vA, vB */
- lsr w3, wINST, #12 // w3<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG s1, w3
- GET_VREG s0, w9
- bl fmodf
- ubfx w9, wINST, #8, #4 // w9<- A
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w9
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: arm64/op_add_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fadd d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: arm64/op_sub_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fsub d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: arm64/op_mul_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fmul d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: arm64/op_div_double_2addr.S */
-/* File: arm64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "x0 = x0 op x1".
- * This must not be a function call, as we keep w2 live across it.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr, add-double/2addr,
- * sub-double/2addr, mul-double/2addr, div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // x1<- vB
- GET_VREG_WIDE d0, w2 // x0<- vA
- .if 0
- cbz d1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
-
- fdiv d0, d0, d1 // result<- op
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: arm64/op_rem_double_2addr.S */
- /* rem vA, vB */
- lsr w1, wINST, #12 // w1<- B
- ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // d1<- vB
- GET_VREG_WIDE d0, w2 // d0<- vA
- bl fmod
- ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
- FETCH_ADVANCE_INST 1 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG_WIDE d0, w2 // vAA<- result
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: arm64/op_add_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- add w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: arm64/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sub w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: arm64/op_mul_int_lit16.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: arm64/op_div_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: arm64/op_rem_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w3, w0, w1
- msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: arm64/op_and_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- and w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: arm64/op_or_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- orr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: arm64/op_xor_int_lit16.S */
-/* File: arm64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- FETCH_S w1, 1 // w1<- ssssCCCC (sign-extended)
- lsr w2, wINST, #12 // w2<- B
- ubfx w9, wINST, #8, #4 // w9<- A
- GET_VREG w0, w2 // w0<- vB
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- eor w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-13 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: arm64/op_add_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- add w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm64/op_rsub_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sub w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: arm64/op_mul_int_lit8.S */
-/* must be "mul w0, w1, w0" -- "w0, w0, w1" is illegal */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- mul w0, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: arm64/op_div_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- sdiv w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: arm64/op_rem_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- asr w1, w3, #8 // optional; typically w1<- ssssssCC (sign extended)
- .if 1
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- sdiv w3, w0, w1 // optional op; may set condition codes
- msub w0, w3, w1, w0 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: arm64/op_and_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- and w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: arm64/op_or_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- orr w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: arm64/op_xor_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- eor w0, w0, w3, asr #8 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: arm64/op_shl_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsl w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: arm64/op_shr_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- asr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm64/op_ushr_int_lit8.S */
-/* File: arm64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = w0 op w1".
- * This could be an ARM instruction or a function call. (If the result
- * comes back in a register other than w0, you can override "result".)
- *
- * You can override "extract" if the extraction of the literal value
- * from w3 to w1 is not the default "asr w1, w3, #8". The extraction
- * can be omitted completely if the shift is embedded in "instr".
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (w1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- FETCH_S w3, 1 // w3<- ssssCCBB (sign-extended for CC)
- lsr w9, wINST, #8 // w9<- AA
- and w2, w3, #255 // w2<- BB
- GET_VREG w0, w2 // w0<- vBB
- ubfx w1, w3, #8, #5 // optional; typically w1<- ssssssCC (sign extended)
- .if 0
- cbz w1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- // optional op; may set condition codes
- lsr w0, w0, w1 // w0<- op, w0-w3 changed
- GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG w0, w9 // vAA<- w0
- GOTO_OPCODE ip // jump to next instruction
- /* 10-12 instructions */
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: arm64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldr w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: arm64/op_iget_wide_quick.S */
- /* iget-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w4, 1 // w4<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldr x0, [x3, x4] // x0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- SET_VREG_WIDE x0, w2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: arm64/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- EXPORT_PC
- GET_VREG w0, w2 // w0<- object we're operating on
- bl artIGetObjectFromMterp // (obj, offset)
- ldr x3, [xSELF, #THREAD_EXCEPTION_OFFSET]
- ubfx w2, wINST, #8, #4 // w2<- A
- PREFETCH_INST 2
- cbnz w3, MterpPossibleException // bail out
- SET_VREG_OBJECT w0, w2 // fp[A]<- w0
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: arm64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- str w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: arm64/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w3, 1 // w3<- field byte offset
- GET_VREG w2, w2 // w2<- fp[B], the object pointer
- ubfx w0, wINST, #8, #4 // w0<- A
- cbz w2, common_errNullObject // object was null
- GET_VREG_WIDE x0, w0 // x0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load wINST
- str x0, [x2, x3] // obj.field<- x0
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: arm64/op_iput_object_quick.S */
- EXPORT_PC
- add x0, xFP, #OFF_FP_SHADOWFRAME
- mov x1, xPC
- mov w2, wINST
- bl MterpIputObjectQuick
- cbz w0, MterpException
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm64/op_invoke_virtual_quick.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualQuick
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm64/op_invoke_virtual_range_quick.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeVirtualQuickRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: arm64/op_iput_boolean_quick.S */
-/* File: arm64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strb w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: arm64/op_iput_byte_quick.S */
-/* File: arm64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strb w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: arm64/op_iput_char_quick.S */
-/* File: arm64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strh w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: arm64/op_iput_short_quick.S */
-/* File: arm64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- fp[B], the object pointer
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- GET_VREG w0, w2 // w0<- fp[A]
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
- strh w0, [x3, x1] // obj.field<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: arm64/op_iget_boolean_quick.S */
-/* File: arm64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrb w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: arm64/op_iget_byte_quick.S */
-/* File: arm64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrsb w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: arm64/op_iget_char_quick.S */
-/* File: arm64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrh w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: arm64/op_iget_short_quick.S */
-/* File: arm64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- lsr w2, wINST, #12 // w2<- B
- FETCH w1, 1 // w1<- field byte offset
- GET_VREG w3, w2 // w3<- object we're operating on
- ubfx w2, wINST, #8, #4 // w2<- A
- cbz w3, common_errNullObject // object was null
- ldrsh w0, [x3, x1] // w0<- obj.field
- FETCH_ADVANCE_INST 2 // advance rPC, load rINST
-
- SET_VREG w0, w2 // fp[A]<- w0
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: arm64/op_unused_f3.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: arm64/op_unused_f4.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: arm64/op_unused_f5.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: arm64/op_unused_f6.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: arm64/op_unused_f7.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: arm64/op_unused_f8.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: arm64/op_unused_f9.S */
-/* File: arm64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: arm64/op_invoke_polymorphic.S */
-/* File: arm64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokePolymorphic
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm64/op_invoke_polymorphic_range.S */
-/* File: arm64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokePolymorphicRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 4
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: arm64/op_invoke_custom.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeCustom
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: arm64/op_invoke_custom_range.S */
-/* File: arm64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- mov x3, xINST
- bl MterpInvokeCustomRange
- cbz w0, MterpException
- FETCH_ADVANCE_INST 3
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: arm64/op_const_method_handle.S */
-/* File: arm64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstMethodHandle // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: arm64/op_const_method_type.S */
-/* File: arm64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- FETCH w0, 1 // w0<- BBBB
- lsr w1, wINST, #8 // w1<- AA
- add x2, xFP, #OFF_FP_SHADOWFRAME
- mov x3, xSELF
- bl MterpConstMethodType // (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 // load rINST
- cbnz w0, MterpPossibleException // let reference interpreter deal with it.
- ADVANCE 2 // advance rPC
- GET_INST_OPCODE ip // extract opcode from rINST
- GOTO_OPCODE ip // jump to next instruction
-
-
- .balign 128
-/* File: arm64/instruction_end.S */
-
- .type artMterpAsmInstructionEnd, #object
- .hidden artMterpAsmInstructionEnd
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: arm64/instruction_start_sister.S */
-
- .type artMterpAsmSisterStart, #object
- .hidden artMterpAsmSisterStart
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
-
-/* File: arm64/instruction_end_sister.S */
-
- .type artMterpAsmSisterEnd, #object
- .hidden artMterpAsmSisterEnd
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: arm64/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNegativeArraySizeException
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNoSuchMethodException
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogExceptionThrownException
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- ldr x2, [xSELF, #THREAD_FLAGS_OFFSET]
- bl MterpLogSuspendFallback
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET]
- cbz x0, MterpFallback // If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpHandleException // (self, shadow_frame)
- cbz w0, MterpExceptionReturn // no local catch, back to caller.
- ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS]
- ldr w1, [xFP, #OFF_FP_DEX_PC]
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET]
- add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- bl MterpShouldSwitchInterpreters
- cbnz w0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE ip
- GOTO_OPCODE ip
- /* NOTE: no fallthrough */
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * wINST <= signed offset
- * wPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- cmp wINST, #0
- b.gt .L_forward_branch // don't add forward branches to hotness
- tbnz wPROFILE, #31, .L_no_count_backwards // go if negative
- subs wPROFILE, wPROFILE, #1 // countdown
- b.eq .L_add_batch // counted down to zero - report
-.L_resume_backward_branch:
- ldr lr, [xSELF, #THREAD_FLAGS_OFFSET]
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- REFRESH_IBASE
- ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne .L_suspend_request_pending
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback
- REFRESH_IBASE // might have changed during suspend
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_no_count_backwards:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.ne .L_resume_backward_branch
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry?
- b.eq .L_check_osr_forward
-.L_resume_forward_branch:
- add w2, wINST, wINST // w2<- byte offset
- FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-.L_check_osr_forward:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xINST
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- add x1, xFP, #OFF_FP_SHADOWFRAME
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- ldr x0, [xFP, #OFF_FP_METHOD]
- mov x2, xSELF
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov wPROFILE, w0 // restore new hotness countdown to wPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, #2
- EXPORT_PC
- bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset)
- cbnz x0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-
-/*
- * Check for suspend check request. Assumes wINST already loaded, xPC advanced and
- * still needs to get the opcode and branch to it, and flags are in lr.
- */
-MterpCheckSuspendAndContinue:
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE
- ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- b.ne check1
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-check1:
- EXPORT_PC
- mov x0, xSELF
- bl MterpSuspendCheck // (self)
- cbnz x0, MterpFallback // Something in the environment changed, switch interpreters
- GET_INST_OPCODE ip // extract opcode from wINST
- GOTO_OPCODE ip // jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- sxtw x2, wINST
- bl MterpLogOSR
-#endif
- mov x0, #1 // Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- bl MterpLogFallback
-#endif
-MterpCommonFallback:
- mov x0, #0 // signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* xFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- mov x0, #1 // signal return to caller.
- b MterpDone
-MterpReturn:
- ldr x2, [xFP, #OFF_FP_RESULT_REGISTER]
- str x0, [x2]
- mov x0, #1 // signal return to caller.
-MterpDone:
-/*
- * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmp wPROFILE, #0
- bgt MterpProfileActive // if > 0, we may have some counts to report.
- .cfi_remember_state
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
- .cfi_restore_state // Reset unwind info so following code unwinds.
- .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598
-
-MterpProfileActive:
- mov xINST, x0 // stash return value
- /* Report cached hotness counts */
- ldr x0, [xFP, #OFF_FP_METHOD]
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xSELF
- strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
- bl MterpAddHotnessBatch // (method, shadow_frame, self)
- mov x0, xINST // restore return value
- RESTORE_TWO_REGS fp, lr, 64
- RESTORE_TWO_REGS xPC, xFP, 48
- RESTORE_TWO_REGS xSELF, xINST, 32
- RESTORE_TWO_REGS xIBASE, xREFS, 16
- RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80
- ret
-
-
-/* File: arm64/instruction_start_alt.S */
-
- .type artMterpAsmAltInstructionStart, #object
- .hidden artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (0 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (1 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (2 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (3 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (4 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (5 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (6 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (7 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (8 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (9 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (10 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (11 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (12 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (13 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (14 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (15 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (16 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (17 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (18 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (19 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (20 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (21 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (22 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (23 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (24 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (25 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (26 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (27 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (28 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (29 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (30 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (31 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (32 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (33 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (34 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (35 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (36 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (37 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (38 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (39 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (40 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (41 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (42 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (43 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (44 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (45 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (46 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (47 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (48 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (49 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (50 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (51 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (52 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (53 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (54 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (55 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (56 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (57 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (58 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (59 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (60 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (61 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (62 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (63 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (64 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (65 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (66 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (67 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (68 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (69 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (70 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (71 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (72 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (73 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (74 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (75 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (76 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (77 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (78 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (79 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (80 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (81 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (82 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (83 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (84 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (85 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (86 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (87 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (88 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (89 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (90 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (91 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (92 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (93 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (94 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (95 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (96 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (97 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (98 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (99 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (100 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (101 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (102 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (103 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (104 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (105 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (106 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (107 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (108 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (109 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (110 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (111 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (112 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (113 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (114 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (115 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (116 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (117 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (118 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (119 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (120 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (121 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (122 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (123 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (124 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (125 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (126 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (127 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (128 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (129 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (130 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (131 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (132 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (133 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (134 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (135 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (136 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (137 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (138 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (139 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (140 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (141 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (142 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (143 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (144 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (145 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (146 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (147 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (148 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (149 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (150 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (151 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (152 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (153 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (154 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (155 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (156 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (157 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (158 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (159 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (160 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (161 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (162 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (163 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (164 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (165 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (166 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (167 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (168 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (169 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (170 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (171 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (172 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (173 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (174 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (175 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (176 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (177 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (178 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (179 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (180 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (181 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (182 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (183 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (184 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (185 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (186 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (187 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (188 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (189 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (190 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (191 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (192 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (193 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (194 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (195 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (196 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (197 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (198 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (199 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (200 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (201 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (202 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (203 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (204 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (205 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (206 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (207 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (208 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (209 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (210 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (211 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (212 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (213 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (214 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (215 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (216 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (217 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (218 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (219 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (220 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (221 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (222 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (223 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (224 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (225 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (226 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (227 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (228 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (229 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (230 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (231 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (232 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (233 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (234 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (235 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (236 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (237 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (238 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (239 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (240 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (241 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (242 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (243 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (244 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (245 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (246 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (247 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (248 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (249 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (250 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (251 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (252 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (253 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (254 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: arm64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh IBASE.
- adr lr, artMterpAsmInstructionStart + (255 * 128) // Addr of primary handler.
- mov x0, xSELF
- add x1, xFP, #OFF_FP_SHADOWFRAME
- mov x2, xPC
- b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
- .balign 128
-/* File: arm64/instruction_end_alt.S */
-
- .type artMterpAsmAltInstructionEnd, #object
- .hidden artMterpAsmAltInstructionEnd
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: arm64/close_cfi.S */
-// Close out the cfi info. We're treating mterp as a single function.
-
-END ExecuteMterpImpl
-
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S
deleted file mode 100644
index fb7d52e..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips.S
+++ /dev/null
@@ -1,12865 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'mips'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: mips/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2 /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64 /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6 /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rIBASE interpreted instruction base pointer, used for computed goto
- s4 rINST first 16-bit code unit of current instruction
- s5 rOBJ object pointer
- s6 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s7 rTEMP used as temp storage that can survive a function call
- s8 rPROFILE branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero $0 /* always zero */
-#define AT $at /* assembler temp */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define t0 $8 /* temp registers (not saved across subroutine calls) */
-#define t1 $9
-#define t2 $10
-#define t3 $11
-#define t4 $12
-#define t5 $13
-#define t6 $14
-#define t7 $15
-#define ta0 $12 /* alias */
-#define ta1 $13
-#define ta2 $14
-#define ta3 $15
-#define s0 $16 /* saved across subroutine calls (callee saved) */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* two more temp registers */
-#define t9 $25
-#define k0 $26 /* kernel temporary */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define s8 $30 /* one more callee saved */
-#define ra $31 /* return address */
-
-/* FP register definitions */
-#define fv0 $f0
-#define fv0f $f1
-#define fv1 $f2
-#define fv1f $f3
-#define fa0 $f12
-#define fa0f $f13
-#define fa1 $f14
-#define fa1f $f15
-#define ft0 $f4
-#define ft0f $f5
-#define ft1 $f6
-#define ft1f $f7
-#define ft2 $f8
-#define ft2f $f9
-#define ft3 $f10
-#define ft3f $f11
-#define ft4 $f16
-#define ft4f $f17
-#define ft5 $f18
-#define ft5f $f19
-#define fs0 $f20
-#define fs0f $f21
-#define fs1 $f22
-#define fs1f $f23
-#define fs2 $f24
-#define fs2f $f25
-#define fs3 $f26
-#define fs3f $f27
-#define fs4 $f28
-#define fs4f $f29
-#define fs5 $f30
-#define fs5f $f31
-
-#ifndef MIPS32REVGE6
-#define fcc0 $fcc0
-#define fcc1 $fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
- seb rd, rt
-#define SEH(rd, rt) \
- seh rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- ins rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
- sll rd, rt, 24; \
- sra rd, rd, 24
-#define SEH(rd, rt) \
- sll rd, rt, 16; \
- sra rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
- sll rt_hi, rt_hi, 16; \
- or rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mthc1 r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
- mtc1 r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
- jic rt, 0
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- lsa rd, rs, rt, sa; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#else
-#define JR(rt) \
- jalr zero, rt
-#define LSA(rd, rs, rt, sa) \
- .if sa; \
- .set push; \
- .set noat; \
- sll AT, rs, sa; \
- addu rd, AT, rt; \
- .set pop; \
- .else; \
- addu rd, rs, rt; \
- .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
- lw tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
- sw rPC, OFF_FP_DEX_PC_PTR(rFP); \
- subu tmp, rPC, tmp; \
- sra tmp, tmp, 1; \
- sw tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction. "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
- lhu rINST, ((_count)*2)(rPC); \
- addu rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd. Updates
- * rPC to point to the next instruction. "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
- addu rPC, rPC, rd; \
- lhu rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC. The
- * "_count" value is in 16-bit code units. Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC. Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
- sll rd, rd, 7; \
- addu rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
- GET_OPCODE_TARGET(rd); \
- JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
- .set noat; \
- EAS2(AT, rFP, rix); \
- l.s rd, (AT); \
- .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- s.s rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- sw zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw rd, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- sw rlo, 0(t8); \
- sw rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- s.s rd, 0(t8); \
- lsa t8, rix, rREFS, 2; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rd, 0(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- jalr zero, dst; \
- sw zero, 0(t8); \
- .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- lsa t8, rix, rFP, 2; \
- .set noat; \
- mfhc1 AT, rlo; \
- s.s rlo, 0(t8); \
- sw AT, 4(t8); \
- .set at; \
- lsa t8, rix, rREFS, 2; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rREFS, AT; \
- sw zero, 0(t8); \
- sw zero, 4(t8); \
- addu t8, rFP, AT; \
- mfhc1 AT, rlo; \
- sw AT, 4(t8); \
- .set at; \
- jalr zero, dst; \
- s.s rlo, 0(t8); \
- .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
- .set noreorder; \
- GET_OPCODE_TARGET(dst); \
- .set noat; \
- sll AT, rix, 2; \
- addu t8, rFP, AT; \
- s.s rlo, 0(t8); \
- s.s rhi, 4(t8); \
- addu t8, rREFS, AT; \
- .set at; \
- sw zero, 0(t8); \
- jalr zero, dst; \
- sw zero, 4(t8); \
- .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- lw rd, 0(AT); \
- .set at
-
-#define STORE_eas2(rd, rbase, roff) \
- .set noat; \
- EAS2(AT, rbase, roff); \
- sw rd, 0(AT); \
- .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
- sw rlo, off(rbase); \
- sw rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
- lw rlo, off(rbase); \
- lw rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- .set noat; \
- mfhc1 AT, rlo; \
- sw AT, (off+4)(rbase); \
- .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- .set noat; \
- lw AT, (off+4)(rbase); \
- mthc1 AT, rlo; \
- .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
- s.s rlo, off(rbase); \
- s.s rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
- l.s rlo, off(rbase); \
- l.s rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP 84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
- STACK_STORE(ra, 124); \
- STACK_STORE(s8, 120); \
- STACK_STORE(s0, 116); \
- STACK_STORE(s1, 112); \
- STACK_STORE(s2, 108); \
- STACK_STORE(s3, 104); \
- STACK_STORE(s4, 100); \
- STACK_STORE(s5, 96); \
- STACK_STORE(s6, 92); \
- STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
- STACK_LOAD(s7, 88); \
- STACK_LOAD(s6, 92); \
- STACK_LOAD(s5, 96); \
- STACK_LOAD(s4, 100); \
- STACK_LOAD(s3, 104); \
- STACK_LOAD(s2, 108); \
- STACK_LOAD(s1, 112); \
- STACK_LOAD(s0, 116); \
- STACK_LOAD(s8, 120); \
- STACK_LOAD(ra, 124); \
- DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH 0xC1E00000
-#define LONG_MIN_HIGH 0x80000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
-
-/* File: mips/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- .align 2
- .global ExecuteMterpImpl
- .ent ExecuteMterpImpl
- .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
- .cfi_startproc
- .set noreorder
- .cpload t9
- .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
- STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
- .cprestore STACK_OFFSET_GP
-
- /* Remember the return register */
- sw a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sw a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs.
- EAS2(rREFS, rFP, a0) # point to reference array in shadow frame
- lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc
- EAS1(rPC, a1, a0) # Create direct pointer to 1st dex opcode
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
- EXPORT_PC()
-
- /* Starting ibase */
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
- /* Set up for backwards branches & osr profiling */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- JAL(MterpSetUpHotnessCountdown) # (method, shadow_frame, self)
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST() # load rINST from rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
- /* NOTE: no fallthrough */
-
-/* File: mips/instruction_start.S */
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: mips/op_nop.S */
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: mips/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: mips/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH(a1, 1) # a1 <- BBBB
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: mips/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH(a1, 2) # a1 <- BBBB
- FETCH(a0, 1) # a0 <- AAAA
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: mips/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- GET_OPA4(a2) # a2 <- A(+)
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: mips/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 1) # a3 <- BBBB
- GET_OPA(a2) # a2 <- AA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: mips/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
- FETCH(a3, 2) # a3 <- BBBB
- FETCH(a2, 1) # a2 <- AAAA
- EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AAAA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: mips/op_move_object.S */
-/* File: mips/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: mips/op_move_object_from16.S */
-/* File: mips/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- FETCH(a1, 1) # a1 <- BBBB
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: mips/op_move_object_16.S */
-/* File: mips/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- FETCH(a1, 2) # a1 <- BBBB
- FETCH(a0, 1) # a0 <- AAAA
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[BBBB]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: mips/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- GET_OPA(a2) # a2 <- AA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
- .else
- SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: mips/op_move_result_wide.S */
- /* move-result-wide vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- LOAD64(a0, a1, a3) # a0/a1 <- retval.j
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[AA] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: mips/op_move_result_object.S */
-/* File: mips/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- GET_OPA(a2) # a2 <- AA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- lw a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(a0, a2, t0) # fp[AA] <- a0
- .else
- SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: mips/op_move_exception.S */
- /* move-exception vAA */
- GET_OPA(a2) # a2 <- AA
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF) # get exception obj
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- SET_VREG_OBJECT(a3, a2) # fp[AA] <- exception obj
- sw zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: mips/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: mips/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- GET_VREG(v0, a2) # v0 <- vAA
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: mips/op_return_wide.S */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- EAS2(a2, rFP, a2) # a2 <- &fp[AA]
- LOAD64(v0, v1, a2) # v0/v1 <- vAA/vAA+1
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: mips/op_return_object.S */
-/* File: mips/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- JAL(MterpThreadFenceForConstructor)
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- GET_OPA(a2) # a2 <- AA
- GET_VREG(v0, a2) # v0 <- vAA
- move v1, zero
- b MterpReturn
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: mips/op_const_4.S */
- /* const/4 vA, +B */
- sll a1, rINST, 16 # a1 <- Bxxx0000
- GET_OPA(a0) # a0 <- A+
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
- and a0, a0, 15
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: mips/op_const_16.S */
- /* const/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: mips/op_const.S */
- /* const vAA, +BBBBbbbb */
- GET_OPA(a3) # a3 <- AA
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: mips/op_const_high16.S */
- /* const/high16 vAA, +BBBB0000 */
- FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- sll a0, a0, 16 # a0 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: mips/op_const_wide_16.S */
- /* const-wide/16 vAA, +BBBB */
- FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
- GET_OPA(a3) # a3 <- AA
- sra a1, a0, 31 # a1 <- ssssssss
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: mips/op_const_wide_32.S */
- /* const-wide/32 vAA, +BBBBbbbb */
- FETCH(a0, 1) # a0 <- 0000bbbb (low)
- GET_OPA(a3) # a3 <- AA
- FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- sra a1, a0, 31 # a1 <- ssssssss
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: mips/op_const_wide.S */
- /* const-wide vAA, +HHHHhhhhBBBBbbbb */
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a1, 2) # a1 <- BBBB (low middle)
- FETCH(a2, 3) # a2 <- hhhh (high middle)
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb (low word)
- FETCH(a3, 4) # a3 <- HHHH (high)
- GET_OPA(t1) # t1 <- AA
- INSERT_HIGH_HALF(a2, a3) # a2 <- HHHHhhhh (high word)
- FETCH_ADVANCE_INST(5) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a2, t1, t0) # vAA/vAA+1 <- a0/a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: mips/op_const_wide_high16.S */
- /* const-wide/high16 vAA, +BBBB000000000000 */
- FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
- GET_OPA(a3) # a3 <- AA
- li a0, 0 # a0 <- 00000000
- sll a1, 16 # a1 <- BBBB0000
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a3, t0) # vAA/vAA+1 <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: mips/op_const_string.S */
-/* File: mips/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: mips/op_const_string_jumbo.S */
- /* const/string vAA, string@BBBBBBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- bbbb (low)
- FETCH(a2, 2) # a2 <- BBBB (high)
- GET_OPA(a1) # a1 <- AA
- INSERT_HIGH_HALF(a0, a2) # a0 <- BBBBbbbb
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstString) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(3) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(3) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: mips/op_const_class.S */
-/* File: mips/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstClass) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: mips/op_monitor_enter.S */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artLockObjectFromCode) # v0 <- artLockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: mips/op_monitor_exit.S */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC()
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a0, a2) # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- JAL(artUnlockObjectFromCode) # v0 <- artUnlockObject(obj, self)
- bnez v0, MterpException
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: mips/op_check_cast.S */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- JAL(MterpCheckCast) # v0 <- CheckCast(index, &obj, method, self)
- PREFETCH_INST(2)
- bnez v0, MterpPossibleException
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: mips/op_instance_of.S */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- CCCC
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &object
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- GET_OPA4(rOBJ) # rOBJ <- A+
- JAL(MterpInstanceOf) # v0 <- Mterp(index, &obj, method, self)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: mips/op_array_length.S */
- /*
- * Return the length of an array.
- */
- /* array-length vA, vB */
- GET_OPB(a1) # a1 <- B
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a0, a1) # a0 <- vB (object ref)
- # is object null?
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- array length
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a3, a2, t0) # vA <- length
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: mips/op_new_instance.S */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- JAL(MterpNewInstance)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: mips/op_new_array.S */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpNewArray)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: mips/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
- move a1, rPC
- move a2, rSELF
- JAL(MterpFilledNewArray) # v0 <- helper(shadow_frame, pc, self)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: mips/op_filled_new_array_range.S */
-/* File: mips/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME # a0 <- shadow frame
- move a1, rPC
- move a2, rSELF
- JAL(MterpFilledNewArrayRange) # v0 <- helper(shadow_frame, pc, self)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: mips/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC()
- FETCH(a1, 1) # a1 <- bbbb (lo)
- FETCH(a0, 2) # a0 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a1, a0) # a1 <- BBBBbbbb
- GET_VREG(a0, a3) # a0 <- vAA (array object)
- EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
- JAL(MterpFillArrayData) # v0 <- Mterp(obj, payload)
- beqz v0, MterpPossibleException # has exception
- FETCH_ADVANCE_INST(3) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: mips/op_throw.S */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC() # exception handler can throw
- GET_OPA(a2) # a2 <- AA
- GET_VREG(a1, a2) # a1 <- vAA (exception object)
- # null object?
- beqz a1, common_errNullObject # yes, throw an NPE instead
- sw a1, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: mips/op_goto.S */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- sll a0, rINST, 16 # a0 <- AAxx0000
- sra rINST, a0, 24 # rINST <- ssssssAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: mips/op_goto_16.S */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- FETCH_S(rINST, 1) # rINST <- ssssAAAA (sign-extended)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: mips/op_goto_32.S */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- FETCH(rINST, 1) # rINST <- aaaa (lo)
- FETCH(a1, 2) # a1 <- AAAA (hi)
- INSERT_HIGH_HALF(rINST, a1) # rINST <- AAAAaaaa
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: mips/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_VREG(a1, a3) # a1 <- vAA
- EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
- JAL(MterpDoPackedSwitch) # a0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: mips/op_sparse_switch.S */
-/* File: mips/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- FETCH(a0, 1) # a0 <- bbbb (lo)
- FETCH(a1, 2) # a1 <- BBBB (hi)
- GET_OPA(a3) # a3 <- AA
- INSERT_HIGH_HALF(a0, a1) # a0 <- BBBBbbbb
- GET_VREG(a1, a3) # a1 <- vAA
- EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
- JAL(MterpDoSparseSwitch) # a0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: mips/op_cmpl_float.S */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * for: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8
- GET_VREG_F(ft0, a2)
- GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
- cmp.eq.s ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.s ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.s fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 0
- c.olt.s fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.s fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: mips/op_cmpg_float.S */
-/* File: mips/op_cmpl_float.S */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * for: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8
- GET_VREG_F(ft0, a2)
- GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
- cmp.eq.s ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.s ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.s fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 1
- c.olt.s fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.s fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: mips/op_cmpl_double.S */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # rOBJ <- BB
- srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
- EAS2(t0, rFP, t0) # t0 <- &fp[CC]
- LOAD64_F(ft0, ft0f, rOBJ)
- LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
- cmp.eq.d ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.d fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 0
- c.olt.d fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.d fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: mips/op_cmpg_double.S */
-/* File: mips/op_cmpl_double.S */
- /*
- * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
- * into the destination register based on the comparison results.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
-
- FETCH(a0, 1) # a0 <- CCBB
- and rOBJ, a0, 255 # rOBJ <- BB
- srl t0, a0, 8 # t0 <- CC
- EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[BB]
- EAS2(t0, rFP, t0) # t0 <- &fp[CC]
- LOAD64_F(ft0, ft0f, rOBJ)
- LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
- cmp.eq.d ft2, ft0, ft1
- li rTEMP, 0
- bc1nez ft2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.d ft2, ft0, ft1
- li rTEMP, -1
- bc1nez ft2, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d ft2, ft1, ft0
- li rTEMP, 1
- bc1nez ft2, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#else
- c.eq.d fcc0, ft0, ft1
- li rTEMP, 0
- bc1t fcc0, 1f # done if vBB == vCC (ordered)
- .if 1
- c.olt.d fcc0, ft0, ft1
- li rTEMP, -1
- bc1t fcc0, 1f # done if vBB < vCC (ordered)
- li rTEMP, 1 # vBB > vCC or unordered
- .else
- c.olt.d fcc0, ft1, ft0
- li rTEMP, 1
- bc1t fcc0, 1f # done if vBB > vCC (ordered)
- li rTEMP, -1 # vBB < vCC or unordered
- .endif
-#endif
-1:
- GET_OPA(rOBJ)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: mips/op_cmp_long.S */
- /*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * I think I can improve on the ARM code by the following observation
- * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
- * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
- * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
- */
- /* cmp-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(a3, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- slt t0, a1, a3 # compare hi
- sgt t1, a1, a3
- subu v0, t1, t0 # v0 <- (-1, 1, 0)
- bnez v0, .Lop_cmp_long_finish
- # at this point x.hi==y.hi
- sltu t0, a0, a2 # compare lo
- sgtu t1, a0, a2
- subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
-
-.Lop_cmp_long_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: mips/op_if_eq.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- beq a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: mips/op_if_ne.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bne a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: mips/op_if_lt.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- blt a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: mips/op_if_ge.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bge a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: mips/op_if_gt.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- bgt a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: mips/op_if_le.S */
-/* File: mips/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- GET_OPA4(a0) # a0 <- A+
- GET_OPB(a1) # a1 <- B
- GET_VREG(a3, a1) # a3 <- vB
- GET_VREG(a0, a0) # a0 <- vA
- FETCH_S(rINST, 1) # rINST<- branch offset, in code units
- ble a0, a3, MterpCommonTakenBranchNoFlags # compare (vA, vB)
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: mips/op_if_eqz.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- beq a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: mips/op_if_nez.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bne a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: mips/op_if_ltz.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- blt a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: mips/op_if_gez.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bge a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: mips/op_if_gtz.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- bgt a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: mips/op_if_lez.S */
-/* File: mips/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform.
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- GET_OPA(a0) # a0 <- AA
- GET_VREG(a0, a0) # a0 <- vAA
- FETCH_S(rINST, 1) # rINST <- branch offset, in code units
- ble a0, zero, MterpCommonTakenBranchNoFlags
- li t0, JIT_CHECK_OSR # possible OSR re-entry?
- beq rPROFILE, t0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: mips/op_unused_3e.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: mips/op_unused_3f.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: mips/op_unused_40.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: mips/op_unused_41.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: mips/op_unused_42.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: mips/op_unused_43.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: mips/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: mips/op_aget_wide.S */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- * Arrays of long/double are 64-bit aligned.
- */
- /* aget-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a2, a3, rOBJ, t0) # vAA/vAA+1 <- a2/a3
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: mips/op_aget_object.S */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- JAL(artAGetObjectFromMterp) # v0 <- GetObj(array, index)
- lw a1, THREAD_EXCEPTION_OFFSET(rSELF)
- PREFETCH_INST(2) # load rINST
- bnez a1, MterpException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, rOBJ, t0) # vAA <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: mips/op_aget_boolean.S */
-/* File: mips/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: mips/op_aget_byte.S */
-/* File: mips/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: mips/op_aget_char.S */
-/* File: mips/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: mips/op_aget_short.S */
-/* File: mips/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
- * instructions. We use a pair of FETCH_Bs instead.
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- # a1 >= a3; compare unsigned index
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: mips/op_aput.S */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: mips/op_aput_wide.S */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- */
- /* aput-wide vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t0) # t0 <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
- EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
- # compare unsigned index, length
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) # a2/a3 <- vBB[vCC]
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: mips/op_aput_object.S */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- *
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpAputObject)
- beqz v0, MterpPossibleException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: mips/op_aput_boolean.S */
-/* File: mips/op_aput.S */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: mips/op_aput_byte.S */
-/* File: mips/op_aput.S */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: mips/op_aput_char.S */
-/* File: mips/op_aput.S */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: mips/op_aput_short.S */
-/* File: mips/op_aput.S */
-
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- FETCH_B(a2, 1, 0) # a2 <- BB
- GET_OPA(rOBJ) # rOBJ <- AA
- FETCH_B(a3, 1, 1) # a3 <- CC
- GET_VREG(a0, a2) # a0 <- vBB (array object)
- GET_VREG(a1, a3) # a1 <- vCC (requested index)
- # null array object?
- beqz a0, common_errNullObject # yes, bail
- LOAD_base_offMirrorArray_length(a3, a0) # a3 <- arrayObj->length
- EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # index >= length, bail
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_VREG(a2, rOBJ) # a2 <- vAA
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- JR(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetU32)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: mips/op_iget_wide.S */
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field byte offset
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetU64)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a2, t0) # fp[A] <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: mips/op_iget_object.S */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetObj)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: mips/op_iget_boolean.S */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetU8)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: mips/op_iget_byte.S */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetI8)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: mips/op_iget_char.S */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetU16)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: mips/op_iget_short.S */
-/* File: mips/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- JAL(MterpIGetI16)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
- .else
- SET_VREG_GOTO(v0, a2, t0) # fp[A] <- v0
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: mips/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU32
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutU32)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: mips/op_iput_wide.S */
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- EAS2(a2, rFP, a2) # a2 <- &fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutU64)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: mips/op_iput_object.S */
- /*
- * 32-bit instance field put.
- *
- * for: iput-object, iput-object-volatile
- */
- /* op vA, vB, field@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpIPutObj)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: mips/op_iput_boolean.S */
-/* File: mips/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU8
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutU8)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: mips/op_iput_byte.S */
-/* File: mips/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI8
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutI8)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: mips/op_iput_char.S */
-/* File: mips/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU16
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutU16)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: mips/op_iput_short.S */
-/* File: mips/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI16
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPB(a1) # a1 <- B
- GET_VREG(a1, a1) # a1 <- fp[B], the object pointer
- GET_OPA4(a2) # a2 <- A+
- GET_VREG(a2, a2) # a2 <- fp[A]
- lw a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST(2) # load rINST
- JAL(MterpIPutI16)
- bnez v0, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU32
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetU32)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: mips/op_sget_wide.S */
- /*
- * 64-bit SGET handler.
- */
- /* sget-wide vAA, field@BBBB */
- .extern MterpSGetU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetU64)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- bnez a3, MterpException
- GET_OPA(a1) # a1 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a1, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: mips/op_sget_object.S */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetObj
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetObj)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 1
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: mips/op_sget_boolean.S */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU8
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetU8)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: mips/op_sget_byte.S */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI8
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetI8)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: mips/op_sget_char.S */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU16
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetU16)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: mips/op_sget_short.S */
-/* File: mips/op_sget.S */
- /*
- * General SGET handler.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI16
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- lw a1, OFF_FP_METHOD(rFP) # a1 <- method
- move a2, rSELF # a2 <- self
- JAL(MterpSGetI16)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA(a2) # a2 <- AA
- PREFETCH_INST(2)
- bnez a3, MterpException # bail out
- ADVANCE(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
-.if 0
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[AA] <- v0
-.else
- SET_VREG_GOTO(v0, a2, t0) # fp[AA] <- v0
-.endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: mips/op_sput.S */
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutU32)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: mips/op_sput_wide.S */
- /*
- * 64-bit SPUT handler.
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref CCCC
- GET_OPA(a1) # a1 <- AA
- EAS2(a1, rFP, a1) # a1 <- &fp[AA]
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutU64)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: mips/op_sput_object.S */
- /*
- * General 32-bit SPUT handler.
- *
- * for: sput-object,
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- JAL(MterpSPutObj)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: mips/op_sput_boolean.S */
-/* File: mips/op_sput.S */
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutU8)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: mips/op_sput_byte.S */
-/* File: mips/op_sput.S */
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutI8)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: mips/op_sput_char.S */
-/* File: mips/op_sput.S */
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutU16)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: mips/op_sput_short.S */
-/* File: mips/op_sput.S */
- /*
- * General SPUT handler.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- field ref BBBB
- GET_OPA(a3) # a3 <- AA
- GET_VREG(a1, a3) # a1 <- fp[AA], the object pointer
- lw a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- PREFETCH_INST(2) # load rINST
- JAL(MterpSPutI16)
- bnez v0, MterpException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: mips/op_invoke_virtual.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtual)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: mips/op_invoke_super.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeSuper)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: mips/op_invoke_direct.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeDirect)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: mips/op_invoke_static.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeStatic)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: mips/op_invoke_interface.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeInterface)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: mips/op_return_void_no_barrier.S */
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqz ra, 1f
- JAL(MterpSuspendCheck) # (self)
-1:
- move v0, zero
- move v1, zero
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: mips/op_invoke_virtual_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: mips/op_invoke_super_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeSuperRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: mips/op_invoke_direct_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeDirectRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: mips/op_invoke_static_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeStaticRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: mips/op_invoke_interface_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeInterfaceRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: mips/op_unused_79.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: mips/op_unused_7a.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: mips/op_neg_int.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- negu a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: mips/op_not_int.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- not a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: mips/op_neg_long.S */
-/* File: mips/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- negu v0, a0 # optional op
- negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: mips/op_not_long.S */
-/* File: mips/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- not a0, a0 # optional op
- not a1, a1 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: mips/op_neg_float.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- addu a0, a0, 0x80000000 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: mips/op_neg_double.S */
-/* File: mips/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0/result1 = op a0/a1".
- * This could be MIPS instruction or a function call.
- *
- * For: neg-long, not-long, neg-double,
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64(a0, a1, a3) # a0/a1 <- vA
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- addu a1, a1, 0x80000000 # a0/a1 <- op, a2-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: mips/op_int_to_long.S */
-/* File: mips/unopWider.S */
- /*
- * Generic 32bit-to-64bit unary operation. Provide an "instr" line
- * that specifies an instruction that performs "result0/result1 = op a0".
- *
- * For: int-to-long
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- sra a1, a0, 31 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: mips/op_int_to_float.S */
-/* File: mips/funop.S */
- /*
- * Generic 32-bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.w fv0, fa0
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t1) # vA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: mips/op_int_to_double.S */
-/* File: mips/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.w fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: mips/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: mips/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- GET_OPB(a1) # a1 <- B from 15:12
- GET_OPA4(a0) # a0 <- A from 11:8
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_VREG(a2, a1) # a2 <- fp[B]
- GET_INST_OPCODE(t0) # t0 <- opcode from rINST
- .if 0
- SET_VREG_OBJECT_GOTO(a2, a0, t0) # fp[A] <- a2
- .else
- SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
- .endif
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: mips/op_long_to_float.S */
- /*
- * long-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdisf)
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: mips/op_long_to_double.S */
- /*
- * long-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
- LOAD64_F(fv0, fv0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.l fv0, fv0
-#else
- LOAD64(rARG0, rARG1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
-#endif
-
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: mips/op_float_to_int.S */
- /*
- * float-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.s fcc0, fa0, fa0
- mtc1 zero, fa0
- movt.s fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
- trunc.w.s fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: mips/op_float_to_long.S */
- /*
- * float-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.s fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.s fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .Lop_float_to_long_get_opcode
-
- li t0, LONG_MIN_AS_FLOAT
- mtc1 t0, fa1
- c.ole.s fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .Lop_float_to_long_get_opcode
-
- neg.s fa1, fa1
- c.ole.s fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .Lop_float_to_long_get_opcode
-
- JAL(__fixsfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .Lop_float_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: mips/op_float_to_double.S */
-/* File: mips/funopWider.S */
- /*
- * Generic 32bit-to-64bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: int-to-double, float-to-double
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.d.s fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: mips/op_double_to_int.S */
- /*
- * double-to-int
- *
- * We have to clip values to int min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-#ifndef MIPS32REVGE6
- li t0, INT_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa1, fa0
-#endif
- GET_INST_OPCODE(t1) # extract opcode from rINST
-#ifndef MIPS32REVGE6
- bc1t fcc0, 1f # if INT_MIN <= vB, proceed to truncation
- c.eq.d fcc0, fa0, fa0
- mtc1 zero, fa0
- MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
- movt.d fa0, fa1, fcc0 # fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
- trunc.w.d fa0, fa0
- SET_VREG_F_GOTO(fa0, rOBJ, t1) # vA <- result
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: mips/op_double_to_long.S */
- /*
- * double-to-long
- *
- * We have to clip values to long min/max per the specification. The
- * expected common case is a "reasonable" value that converts directly
- * to modest integer. The EABI convert function isn't doing this for us
- * for pre-R6.
- */
- /* unop vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
- GET_INST_OPCODE(t1) # extract opcode from rINST
- trunc.l.d fa0, fa0
- SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) # vA <- result
-#else
- c.eq.d fcc0, fa0, fa0
- li rRESULT0, 0
- li rRESULT1, 0
- bc1f fcc0, .Lop_double_to_long_get_opcode
-
- li t0, LONG_MIN_AS_DOUBLE_HIGH
- mtc1 zero, fa1
- MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
- c.ole.d fcc0, fa0, fa1
- li rRESULT1, LONG_MIN_HIGH
- bc1t fcc0, .Lop_double_to_long_get_opcode
-
- neg.d fa1, fa1
- c.ole.d fcc0, fa1, fa0
- nor rRESULT0, rRESULT0, zero
- nor rRESULT1, rRESULT1, zero
- bc1t fcc0, .Lop_double_to_long_get_opcode
-
- JAL(__fixdfdi)
- GET_INST_OPCODE(t1) # extract opcode from rINST
- b .Lop_double_to_long_set_vreg
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: mips/op_double_to_float.S */
-/* File: mips/unopNarrower.S */
- /*
- * Generic 64bit-to-32bit floating-point unary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = op fa0".
- *
- * For: double-to-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- EAS2(a3, rFP, a3) # a3 <- &fp[B]
- LOAD64_F(fa0, fa0f, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- cvt.s.d fv0, fa0
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: mips/op_int_to_byte.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- SEB(a0, a0) # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: mips/op_int_to_char.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- and a0, 0xffff # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: mips/op_int_to_short.S */
-/* File: mips/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result0 = op a0".
- * This could be a MIPS instruction or a function call.
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * neg-int, not-int, neg-float
- */
- /* unop vA, vB */
- GET_OPB(a3) # a3 <- B
- GET_OPA4(t0) # t0 <- A+
- GET_VREG(a0, a3) # a0 <- vB
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- # optional op
- SEH(a0, a0) # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG_GOTO(a0, t0, t1) # vA <- result0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: mips/op_add_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: mips/op_sub_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: mips/op_mul_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: mips/op_div_int.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: mips/op_rem_int.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: mips/op_and_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: mips/op_or_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: mips/op_xor_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: mips/op_shl_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: mips/op_shr_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: mips/op_ushr_int.S */
-/* File: mips/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG(a1, a3) # a1 <- vCC
- GET_VREG(a0, a2) # a0 <- vBB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: mips/op_add_long.S */
-/*
- * The compiler generates the following sequence for
- * [v1 v0] = [a1 a0] + [a3 a2];
- * addu v0,a2,a0
- * addu a1,a3,a1
- * sltu v1,v0,a2
- * addu v1,v1,a1
- */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- addu v0, a2, a0 # optional op
- addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: mips/op_sub_long.S */
-/*
- * For little endian the code sequence looks as follows:
- * subu v0,a0,a2
- * subu v1,a1,a3
- * sltu a0,a0,v0
- * subu v1,v1,a0
- */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- subu v0, a0, a2 # optional op
- subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: mips/op_mul_long.S */
- /*
- * Signed 64-bit integer multiply.
- * a1 a0
- * x a3 a2
- * -------------
- * a2a1 a2a0
- * a3a0
- * a3a1 (<= unused)
- * ---------------
- * v1 v0
- */
- /* mul-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- and t0, a0, 255 # a2 <- BB
- srl t1, a0, 8 # a3 <- CC
- EAS2(t0, rFP, t0) # t0 <- &fp[BB]
- LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
-
- EAS2(t1, rFP, t1) # t0 <- &fp[CC]
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
-#endif
- mul t0, a2, a1 # t0= a2a1
- addu v1, v1, t1 # v1+= hi(a2a0)
- addu v1, v1, t0 # v1= a3a0 + a2a1;
-
- GET_OPA(a0) # a0 <- AA
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- b .Lop_mul_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: mips/op_div_long.S */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- JAL(__divdi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: mips/op_rem_long.S */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- JAL(__moddi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: mips/op_and_long.S */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- and a0, a0, a2 # optional op
- and a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: mips/op_or_long.S */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- or a0, a0, a2 # optional op
- or a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: mips/op_xor_long.S */
-/* File: mips/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a2-a3). Useful for integer division and modulus.
- *
- * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
- * xor-long
- *
- * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
- LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- xor a0, a0, a2 # optional op
- xor a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vAA/vAA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: mips/op_shl_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shl-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t2) # t2 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .Lop_shl_long_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, t2, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: mips/op_shr_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* shr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(t3) # t3 <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_shr_long_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vAA/VAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: mips/op_ushr_long.S */
- /*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance.
- */
- /* ushr-long vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a3, a0, 255 # a3 <- BB
- srl a0, a0, 8 # a0 <- CC
- EAS2(a3, rFP, a3) # a3 <- &fp[BB]
- GET_VREG(a2, a0) # a2 <- vCC
- LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_ushr_long_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vAA/vAA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: mips/op_add_float.S */
-/* File: mips/fbinop.S */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- add.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: mips/op_sub_float.S */
-/* File: mips/fbinop.S */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- sub.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: mips/op_mul_float.S */
-/* File: mips/fbinop.S */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- mul.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: mips/op_div_float.S */
-/* File: mips/fbinop.S */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div.s fv0, fa0, fa1 # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: mips/op_rem_float.S */
-/* File: mips/fbinop.S */
- /*
- * Generic 32-bit binary float operation.
- *
- * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
- */
-
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- srl a3, a0, 8 # a3 <- CC
- and a2, a0, 255 # a2 <- BB
- GET_VREG_F(fa1, a3) # a1 <- vCC
- GET_VREG_F(fa0, a2) # a0 <- vBB
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- JAL(fmodf) # f0 = result
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vAA <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: mips/op_add_double.S */
-/* File: mips/fbinopWide.S */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- add.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: mips/op_sub_double.S */
-/* File: mips/fbinopWide.S */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- sub.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: mips/op_mul_double.S */
-/* File: mips/fbinopWide.S */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- mul.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: mips/op_div_double.S */
-/* File: mips/fbinopWide.S */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- div.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: mips/op_rem_double.S */
-/* File: mips/fbinopWide.S */
- /*
- * Generic 64-bit floating-point binary operation. Provide an "instr"
- * line that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * for: add-double, sub-double, mul-double, div-double,
- * rem-double
- *
- */
- /* binop vAA, vBB, vCC */
- FETCH(a0, 1) # a0 <- CCBB
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a0, 255 # a2 <- BB
- srl a3, a0, 8 # a3 <- CC
- EAS2(a2, rFP, a2) # a2 <- &fp[BB]
- EAS2(t1, rFP, a3) # a3 <- &fp[CC]
- LOAD64_F(fa0, fa0f, a2)
- LOAD64_F(fa1, fa1f, t1)
-
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- JAL(fmod)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vAA/vAA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: mips/op_add_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: mips/op_sub_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: mips/op_mul_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: mips/op_div_int_2addr.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: mips/op_rem_int_2addr.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: mips/op_and_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: mips/op_or_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: mips/op_xor_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: mips/op_shl_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: mips/op_shr_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: mips/op_ushr_int_2addr.S */
-/* File: mips/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call.
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a0, rOBJ) # a0 <- vA
- GET_VREG(a1, a3) # a1 <- vB
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: mips/op_add_long_2addr.S */
-/*
- * See op_add_long.S for details
- */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- addu v0, a2, a0 # optional op
- addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: mips/op_sub_long_2addr.S */
-/*
- * See op_sub_long.S for more details
- */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- subu v0, a0, a2 # optional op
- subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: mips/op_mul_long_2addr.S */
- /*
- * See op_mul_long.S for more details
- */
- /* mul-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
-
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # vAA.low / high
-
- GET_OPB(t1) # t1 <- B
- EAS2(t1, rFP, t1) # t1 <- &fp[B]
- LOAD64(a2, a3, t1) # vBB.low / high
-
- mul v1, a3, a0 # v1= a3a0
-#ifdef MIPS32REVGE6
- mulu v0, a2, a0 # v0= a2a0
- muhu t1, a2, a0
-#else
- multu a2, a0
- mfhi t1
- mflo v0 # v0= a2a0
- #endif
- mul t2, a2, a1 # t2= a2a1
- addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
- addu v1, v1, t2 # v1= v1 + a2a1;
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t1) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t1) # vA/vA+1 <- v0(low)/v1(high)
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: mips/op_div_long_2addr.S */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- JAL(__divdi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: mips/op_rem_long_2addr.S */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 1
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- # optional op
- JAL(__moddi3) # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: mips/op_and_long_2addr.S */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- and a0, a0, a2 # optional op
- and a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: mips/op_or_long_2addr.S */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- or a0, a0, a2 # optional op
- or a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: mips/op_xor_long_2addr.S */
-/* File: mips/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0-a1 op a2-a3".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register pair other than a0-a1, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a2-a3). Useful for integer division and modulus.
- *
- * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
- * and-long/2addr, or-long/2addr, xor-long/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64(a2, a3, a1) # a2/a3 <- vB/vB+1
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- .if 0
- or t0, a2, a3 # second arg (a2-a3) is zero?
- beqz t0, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- xor a0, a0, a2 # optional op
- xor a1, a1, a3 # result <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, rOBJ, t0) # vA/vA+1 <- a0/a1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: mips/op_shl_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t2, rFP, rOBJ) # t2 <- &fp[A]
- LOAD64(a0, a1, t2) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v1, a2, 0x20 # shift< shift & 0x20
- sll v0, a0, a2 # rlo<- alo << (shift&31)
- bnez v1, .Lop_shl_long_2addr_finish
- not v1, a2 # rhi<- 31-shift (shift is 5b)
- srl a0, 1
- srl a0, v1 # alo<- alo >> (32-(shift&31))
- sll v1, a1, a2 # rhi<- ahi << (shift&31)
- or v1, a0 # rhi<- rhi | alo
- SET_VREG64_GOTO(v0, v1, rOBJ, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: mips/op_shr_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shr-long/2addr vA, vB */
- GET_OPA4(t2) # t2 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t2) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- sra v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_shr_long_2addr_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-shift (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t2, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips/op_ushr_long_2addr.S */
- /*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* ushr-long/2addr vA, vB */
- GET_OPA4(t3) # t3 <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG(a2, a3) # a2 <- vB
- EAS2(t0, rFP, t3) # t0 <- &fp[A]
- LOAD64(a0, a1, t0) # a0/a1 <- vA/vA+1
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
-
- andi v0, a2, 0x20 # shift & 0x20
- srl v1, a1, a2 # rhi<- ahi >> (shift&31)
- bnez v0, .Lop_ushr_long_2addr_finish
- srl v0, a0, a2 # rlo<- alo >> (shift&31)
- not a0, a2 # alo<- 31-n (shift is 5b)
- sll a1, 1
- sll a1, a0 # ahi<- ahi << (32-(shift&31))
- or v0, a1 # rlo<- rlo | ahi
- SET_VREG64_GOTO(v0, v1, t3, t0) # vA/vA+1 <- v0/v1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: mips/op_add_float_2addr.S */
-/* File: mips/fbinop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- add.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: mips/op_sub_float_2addr.S */
-/* File: mips/fbinop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- sub.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: mips/op_mul_float_2addr.S */
-/* File: mips/fbinop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- mul.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: mips/op_div_float_2addr.S */
-/* File: mips/fbinop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- div.s fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: mips/op_rem_float_2addr.S */
-/* File: mips/fbinop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr"
- * that specifies an instruction that performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
- * div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a3) # a3 <- B
- GET_VREG_F(fa0, rOBJ)
- GET_VREG_F(fa1, a3)
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
-
- JAL(fmodf)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_F_GOTO(fv0, rOBJ, t0) # vA <- result
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: mips/op_add_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- add.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: mips/op_sub_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- sub.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: mips/op_mul_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- mul.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: mips/op_div_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- div.d fv0, fa0, fa1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: mips/op_rem_double_2addr.S */
-/* File: mips/fbinopWide2addr.S */
- /*
- * Generic 64-bit floating-point "/2addr" binary operation.
- * Provide an "instr" line that specifies an instruction that
- * performs "fv0 = fa0 op fa1".
- * This could be an MIPS instruction or a function call.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
- * div-double/2addr, rem-double/2addr
- */
- /* binop/2addr vA, vB */
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_OPB(a1) # a1 <- B
- EAS2(a1, rFP, a1) # a1 <- &fp[B]
- EAS2(t0, rFP, rOBJ) # t0 <- &fp[A]
- LOAD64_F(fa0, fa0f, t0)
- LOAD64_F(fa1, fa1f, a1)
-
- FETCH_ADVANCE_INST(1) # advance rPC, load rINST
- JAL(fmod)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) # vA/vA+1 <- fv0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: mips/op_add_int_lit16.S */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: mips/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: mips/op_mul_int_lit16.S */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: mips/op_div_int_lit16.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: mips/op_rem_int_lit16.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#else
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 1
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: mips/op_and_int_lit16.S */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: mips/op_or_int_lit16.S */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: mips/op_xor_int_lit16.S */
-/* File: mips/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, +CCCC */
- FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
- GET_OPB(a2) # a2 <- B
- GET_OPA4(rOBJ) # rOBJ <- A+
- GET_VREG(a0, a2) # a0 <- vB
- .if 0
- # cmp a1, 0; is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: mips/op_add_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips/op_rsub_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: mips/op_mul_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: mips/op_div_int_lit8.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mflo a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: mips/op_rem_int_lit8.S */
-#ifdef MIPS32REVGE6
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#else
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 1
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- div zero, a0, a1 # optional op
- mfhi a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-#endif
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: mips/op_and_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: mips/op_or_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: mips/op_xor_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: mips/op_shl_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: mips/op_shr_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips/op_ushr_int_lit8.S */
-/* File: mips/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, +CC */
- FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
- GET_OPA(rOBJ) # rOBJ <- AA
- and a2, a3, 255 # a2 <- BB
- GET_VREG(a0, a2) # a0 <- vBB
- sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
- .if 0
- # is second operand zero?
- beqz a1, common_errDivideByZero
- .endif
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
-
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: mips/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lw a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: mips/op_iget_wide_quick.S */
- /* iget-wide-quick vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1 # t0 <- a3 + a1
- LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(a0, a1, a2, t0) # fp[A] <- a0/a1
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: mips/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- FETCH(a1, 1) # a1 <- field byte offset
- EXPORT_PC()
- GET_VREG(a0, a2) # a0 <- object we're operating on
- JAL(artIGetObjectFromMterp) # v0 <- GetObj(obj, offset)
- lw a3, THREAD_EXCEPTION_OFFSET(rSELF)
- GET_OPA4(a2) # a2<- A+
- PREFETCH_INST(2) # load rINST
- bnez a3, MterpPossibleException # bail out
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_OBJECT_GOTO(v0, a2, t0) # fp[A] <- v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: mips/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sw a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: mips/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset@CCCC */
- GET_OPA4(a0) # a0 <- A(+)
- GET_OPB(a1) # a1 <- B
- GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
- # check object for null
- beqz a2, common_errNullObject # object was null
- EAS2(a3, rFP, a0) # a3 <- &fp[A]
- LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
- FETCH(a3, 1) # a3 <- field byte offset
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GET_OPCODE_TARGET(t0)
- STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
- JR(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: mips/op_iput_object_quick.S */
- /* For: iput-object-quick */
- /* op vA, vB, offset@CCCC */
- EXPORT_PC()
- addu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- JAL(MterpIputObjectQuick)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips/op_invoke_virtual_quick.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualQuick)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips/op_invoke_virtual_range_quick.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeVirtualQuickRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: mips/op_iput_boolean_quick.S */
-/* File: mips/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: mips/op_iput_byte_quick.S */
-/* File: mips/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sb a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: mips/op_iput_char_quick.S */
-/* File: mips/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: mips/op_iput_short_quick.S */
-/* File: mips/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- beqz a3, common_errNullObject # object was null
- GET_VREG(a0, a2) # a0 <- fp[A]
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- addu t0, a3, a1
- GET_INST_OPCODE(t1) # extract opcode from rINST
- GET_OPCODE_TARGET(t1)
- sh a0, 0(t0) # obj.field (8/16/32 bits) <- a0
- JR(t1) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: mips/op_iget_boolean_quick.S */
-/* File: mips/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lbu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: mips/op_iget_byte_quick.S */
-/* File: mips/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lb a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: mips/op_iget_char_quick.S */
-/* File: mips/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lhu a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: mips/op_iget_short_quick.S */
-/* File: mips/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- GET_OPB(a2) # a2 <- B
- GET_VREG(a3, a2) # a3 <- object we're operating on
- FETCH(a1, 1) # a1 <- field byte offset
- GET_OPA4(a2) # a2 <- A(+)
- # check object for null
- beqz a3, common_errNullObject # object was null
- addu t0, a3, a1
- lh a0, 0(t0) # a0 <- obj.field (8/16/32 bits)
- FETCH_ADVANCE_INST(2) # advance rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG_GOTO(a0, a2, t0) # fp[A] <- a0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: mips/op_unused_f3.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: mips/op_unused_f4.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: mips/op_unused_f5.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: mips/op_unused_f6.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: mips/op_unused_f7.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: mips/op_unused_f8.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: mips/op_unused_f9.S */
-/* File: mips/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: mips/op_invoke_polymorphic.S */
-/* File: mips/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokePolymorphic)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(4)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips/op_invoke_polymorphic_range.S */
-/* File: mips/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokePolymorphicRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(4)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: mips/op_invoke_custom.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeCustom)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: mips/op_invoke_custom_range.S */
-/* File: mips/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC()
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- JAL(MterpInvokeCustomRange)
- beqz v0, MterpException
- FETCH_ADVANCE_INST(3)
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: mips/op_const_method_handle.S */
-/* File: mips/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstMethodHandle) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: mips/op_const_method_type.S */
-/* File: mips/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC()
- FETCH(a0, 1) # a0 <- BBBB
- GET_OPA(a1) # a1 <- AA
- addu a2, rFP, OFF_FP_SHADOWFRAME # a2 <- shadow frame
- move a3, rSELF
- JAL(MterpConstMethodType) # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
- PREFETCH_INST(2) # load rINST
- bnez v0, MterpPossibleException
- ADVANCE(2) # advance rPC
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-
- .balign 128
-/* File: mips/instruction_end.S */
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: mips/instruction_start_sister.S */
-
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
-
-
-/* continuation for op_float_to_long */
-
-#ifndef MIPS32REVGE6
-.Lop_float_to_long_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.Lop_float_to_long_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
-
-/* continuation for op_double_to_long */
-
-#ifndef MIPS32REVGE6
-.Lop_double_to_long_get_opcode:
- GET_INST_OPCODE(t1) # extract opcode from rINST
-.Lop_double_to_long_set_vreg:
- SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1) # vA/vA+1 <- v0/v1
-#endif
-
-/* continuation for op_mul_long */
-
-.Lop_mul_long_finish:
- GET_INST_OPCODE(t0) # extract opcode from rINST
- SET_VREG64_GOTO(v0, v1, a0, t0) # vAA/vAA+1 <- v0(low)/v1(high)
-
-/* continuation for op_shl_long */
-
-.Lop_shl_long_finish:
- SET_VREG64_GOTO(zero, v0, t2, t0) # vAA/vAA+1 <- rlo/rhi
-
-/* continuation for op_shr_long */
-
-.Lop_shr_long_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t3, t0) # vAA/VAA+1 <- rlo/rhi
-
-/* continuation for op_ushr_long */
-
-.Lop_ushr_long_finish:
- SET_VREG64_GOTO(v1, zero, rOBJ, t0) # vAA/vAA+1 <- rlo/rhi
-
-/* continuation for op_shl_long_2addr */
-
-.Lop_shl_long_2addr_finish:
- SET_VREG64_GOTO(zero, v0, rOBJ, t0) # vA/vA+1 <- rlo/rhi
-
-/* continuation for op_shr_long_2addr */
-
-.Lop_shr_long_2addr_finish:
- sra a3, a1, 31 # a3<- sign(ah)
- SET_VREG64_GOTO(v1, a3, t2, t0) # vA/vA+1 <- rlo/rhi
-
-/* continuation for op_ushr_long_2addr */
-
-.Lop_ushr_long_2addr_finish:
- SET_VREG64_GOTO(v1, zero, t3, t0) # vA/vA+1 <- rlo/rhi
-/* File: mips/instruction_end_sister.S */
-
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: mips/instruction_start_alt.S */
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (0 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (1 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (2 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (3 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (4 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (5 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (6 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (7 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (8 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (9 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (10 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (11 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (12 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (13 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (14 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (15 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (16 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (17 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (18 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (19 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (20 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (21 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (22 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (23 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (24 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (25 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (26 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (27 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (28 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (29 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (30 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (31 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (32 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (33 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (34 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (35 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (36 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (37 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (38 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (39 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (40 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (41 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (42 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (43 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (44 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (45 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (46 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (47 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (48 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (49 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (50 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (51 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (52 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (53 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (54 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (55 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (56 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (57 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (58 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (59 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (60 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (61 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (62 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (63 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (64 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (65 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (66 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (67 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (68 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (69 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (70 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (71 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (72 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (73 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (74 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (75 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (76 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (77 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (78 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (79 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (80 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (81 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (82 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (83 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (84 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (85 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (86 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (87 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (88 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (89 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (90 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (91 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (92 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (93 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (94 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (95 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (96 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (97 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (98 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (99 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (100 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (101 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (102 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (103 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (104 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (105 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (106 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (107 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (108 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (109 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (110 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (111 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (112 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (113 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (114 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (115 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (116 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (117 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (118 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (119 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (120 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (121 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (122 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (123 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (124 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (125 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (126 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (127 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (128 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (129 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (130 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (131 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (132 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (133 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (134 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (135 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (136 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (137 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (138 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (139 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (140 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (141 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (142 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (143 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (144 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (145 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (146 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (147 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (148 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (149 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (150 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (151 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (152 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (153 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (154 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (155 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (156 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (157 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (158 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (159 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (160 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (161 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (162 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (163 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (164 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (165 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (166 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (167 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (168 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (169 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (170 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (171 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (172 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (173 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (174 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (175 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (176 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (177 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (178 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (179 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (180 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (181 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (182 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (183 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (184 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (185 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (186 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (187 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (188 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (189 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (190 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (191 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (192 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (193 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (194 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (195 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (196 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (197 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (198 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (199 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (200 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (201 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (202 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (203 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (204 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (205 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (206 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (207 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (208 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (209 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (210 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (211 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (212 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (213 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (214 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (215 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (216 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (217 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (218 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (219 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (220 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (221 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (222 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (223 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (224 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (225 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (226 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (227 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (228 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (229 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (230 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (231 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (232 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (233 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (234 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (235 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (236 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (237 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (238 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (239 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (240 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (241 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (242 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (243 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (244 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (245 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (246 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (247 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (248 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (249 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (250 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (251 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (252 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (253 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (254 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: mips/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- la ra, artMterpAsmInstructionStart + (255 * 128) # Addr of primary handler
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) # refresh IBASE
- move a0, rSELF # arg0
- addu a1, rFP, OFF_FP_SHADOWFRAME # arg1
- move a2, rPC
- la t9, MterpCheckBefore
- jalr zero, t9 # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
- .balign 128
-/* File: mips/instruction_end_alt.S */
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: mips/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogDivideByZeroException)
-#endif
- b MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogArrayIndexException)
-#endif
- b MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNegativeArraySizeException)
-#endif
- b MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNoSuchMethodException)
-#endif
- b MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogNullObjectException)
-#endif
- b MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogExceptionThrownException)
-#endif
- b MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- lw a2, THREAD_FLAGS_OFFSET(rSELF)
- JAL(MterpLogSuspendFallback)
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- lw a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqz a0, MterpFallback # If exception, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpHandleException) # (self, shadow_frame)
- beqz v0, MterpExceptionReturn # no local catch, back to caller.
- lw a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lw a1, OFF_FP_DEX_PC(rFP)
- lw rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
- EAS1(rPC, a0, a1) # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- JAL(MterpShouldSwitchInterpreters)
- bnez v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC()
- FETCH_INST()
- GET_INST_OPCODE(t0)
- GOTO_OPCODE(t0)
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
- bgtz rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- li t0, JIT_CHECK_OSR
- beq rPROFILE, t0, .L_osr_check
- blt rPROFILE, t0, .L_resume_backward_branch
- subu rPROFILE, 1
- beqz rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE()
- addu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnez ra, .L_suspend_request_pending
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC()
- move a0, rSELF
- JAL(MterpSuspendCheck) # (self)
- bnez v0, MterpFallback
- REFRESH_IBASE() # might have changed during suspend
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_no_count_backwards:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- bne rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li t0, JIT_CHECK_OSR # check for possible OSR re-entry
- beq rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
- add a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- addu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- lw a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC()
- JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- bnez v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST(2)
- GET_INST_OPCODE(t0) # extract opcode from rINST
- GOTO_OPCODE(t0) # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- JAL(MterpLogOSR)
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC()
-#if MTERP_LOGGING
- move a0, rSELF
- addu a1, rFP, OFF_FP_SHADOWFRAME
- JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- move v0, zero # signal retry with reference interpreter.
- b MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR. Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-MterpReturn:
- lw a2, OFF_FP_RESULT_REGISTER(rFP)
- sw v0, 0(a2)
- sw v1, 4(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- lw a0, OFF_FP_METHOD(rFP)
- addu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- JAL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
- STACK_LOAD_FULL()
- jalr zero, ra
-
- .cfi_endproc
- .end ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
deleted file mode 100644
index 6561691..0000000
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ /dev/null
@@ -1,12479 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'mips64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: mips64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $0 /* always zero */
-#define AT $at /* assembler temp */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define a4 $8 /* expanded register arguments */
-#define a5 $9
-#define a6 $10
-#define a7 $11
-#define ta0 $8 /* alias */
-#define ta1 $9
-#define ta2 $10
-#define ta3 $11
-#define t0 $12 /* temp registers (not saved across subroutine calls) */
-#define t1 $13
-#define t2 $14
-#define t3 $15
-
-#define s0 $16 /* saved across subroutine calls (callee saved) */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* two more temp registers */
-#define t9 $25
-#define k0 $26 /* kernel temporary */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define s8 $30 /* one more callee saved */
-#define ra $31 /* return address */
-
-#define f0 $f0
-#define f1 $f1
-#define f2 $f2
-#define f3 $f3
-#define f12 $f12
-#define f13 $f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
- bgec \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
- bltc \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
- reg nick purpose
- s0 rPC interpreted program counter, used for fetching instructions
- s1 rFP interpreted frame pointer, used for accessing locals and args
- s2 rSELF self (Thread) pointer
- s3 rINST first 16-bit code unit of current instruction
- s4 rIBASE interpreted instruction base pointer, used for computed goto
- s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
- s6 rPROFILE jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16 // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4 // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rINST s3
-#define rIBASE s4
-#define rREFS s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- sd rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST. Does not advance rPC.
- */
-.macro FETCH_INST
- lhu rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
- daddu rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction. "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
- daddu rPC, rPC, \reg
- FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset. Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss. (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
- ADVANCE \count
- FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
- * rINST ahead of possible exception point. Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
- lhu rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
- and \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
- .set noat
- sll AT, \reg, 7
- daddu AT, rIBASE, AT
- jic AT, 0
- .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_U reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwu \reg, 0(AT)
- .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw \reg, 0(AT)
- .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lw \reg, 0(AT)
- lw AT, 4(AT)
- dinsu \reg, AT, 32, 32
- .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- lwc1 \reg, 0(AT)
- lw AT, 4(AT)
- mthc1 AT, \reg
- .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
- .set noat
- dlsa AT, \vreg, rFP, 2
- sw \reg, 0(AT)
- drotr32 \reg, \reg, 0
- sw \reg, 4(AT)
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
- .set noat
- dlsa AT, \vreg, rREFS, 2
- sw zero, 0(AT)
- sw zero, 4(AT)
- dlsa AT, \vreg, rFP, 2
- swc1 \reg, 0(AT)
- mfhc1 \vreg, \reg
- sw \vreg, 4(AT)
- .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE 80 /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN 0x80000000
-#define INT_MIN_AS_FLOAT 0xCF000000
-#define INT_MIN_AS_DOUBLE 0xC1E0000000000000
-#define LONG_MIN 0x8000000000000000
-#define LONG_MIN_AS_FLOAT 0xDF000000
-#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000
-
-/* File: mips64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
- .set reorder
-
- .text
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
- .balign 16
-/*
- * On entry:
- * a0 Thread* self
- * a1 dex_instructions
- * a2 ShadowFrame
- * a3 JValue* result_register
- *
- */
-ExecuteMterpImpl:
- .cfi_startproc
- .cpsetup t9, t8, ExecuteMterpImpl
-
- .cfi_def_cfa sp, 0
- daddu sp, sp, -STACK_SIZE
- .cfi_adjust_cfa_offset STACK_SIZE
-
- sd t8, STACK_OFFSET_GP(sp)
- .cfi_rel_offset 28, STACK_OFFSET_GP
- sd ra, STACK_OFFSET_RA(sp)
- .cfi_rel_offset 31, STACK_OFFSET_RA
-
- sd s0, STACK_OFFSET_S0(sp)
- .cfi_rel_offset 16, STACK_OFFSET_S0
- sd s1, STACK_OFFSET_S1(sp)
- .cfi_rel_offset 17, STACK_OFFSET_S1
- sd s2, STACK_OFFSET_S2(sp)
- .cfi_rel_offset 18, STACK_OFFSET_S2
- sd s3, STACK_OFFSET_S3(sp)
- .cfi_rel_offset 19, STACK_OFFSET_S3
- sd s4, STACK_OFFSET_S4(sp)
- .cfi_rel_offset 20, STACK_OFFSET_S4
- sd s5, STACK_OFFSET_S5(sp)
- .cfi_rel_offset 21, STACK_OFFSET_S5
- sd s6, STACK_OFFSET_S6(sp)
- .cfi_rel_offset 22, STACK_OFFSET_S6
-
- /* Remember the return register */
- sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
- /* Remember the dex instruction pointer */
- sd a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
- /* set up "named" registers */
- move rSELF, a0
- daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET
- lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
- dlsa rREFS, v0, rFP, 2
- lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
- dlsa rPC, v0, a1, 1
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* Set up for backwards branches & osr profiling */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- jal MterpSetUpHotnessCountdown
- move rPROFILE, v0 # Starting hotness countdown to rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /* NOTE: no fallthrough */
-
-/* File: mips64/instruction_start.S */
-
- .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: mips64/op_nop.S */
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: mips64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: mips64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: mips64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAAAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: mips64/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- ext a3, rINST, 12, 4 # a3 <- B
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG_WIDE a0, a3 # a0 <- vB
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: mips64/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: mips64/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- GET_VREG_WIDE a0, a3 # a0 <- vBBBB
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: mips64/op_move_object.S */
-/* File: mips64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: mips64/op_move_object_from16.S */
-/* File: mips64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- lhu a3, 2(rPC) # a3 <- BBBB
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: mips64/op_move_object_16.S */
-/* File: mips64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- lhu a3, 4(rPC) # a3 <- BBBB
- lhu a2, 2(rPC) # a2 <- AAAA
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vBBBB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB
- .else
- SET_VREG a0, a2 # vAAAA <- vBBBB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: mips64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vAA <- result
- .else
- SET_VREG a0, a2 # vAA <- result
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: mips64/op_move_result_wide.S */
- /* for: move-result-wide */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- ld a0, 0(a0) # a0 <- result.j
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: mips64/op_move_result_object.S */
-/* File: mips64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- srl a2, rINST, 8 # a2 <- AA
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType
- lw a0, 0(a0) # a0 <- result.i
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 1
- SET_VREG_OBJECT a0, a2 # vAA <- result
- .else
- SET_VREG a0, a2 # vAA <- result
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: mips64/op_move_exception.S */
- /* move-exception vAA */
- srl a2, rINST, 8 # a2 <- AA
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- SET_VREG_OBJECT a0, a2 # vAA <- exception obj
- GET_INST_OPCODE v0 # extract opcode from rINST
- sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: mips64/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: mips64/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return (sign-extend), return-object (zero-extend)
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a2 # a0 <- vAA
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: mips64/op_return_wide.S */
- /*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vAA
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: mips64/op_return_object.S */
-/* File: mips64/op_return.S */
- /*
- * Return a 32-bit value.
- *
- * for: return (sign-extend), return-object (zero-extend)
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- .extern MterpSuspendCheck
- jal MterpThreadFenceForConstructor
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
- b MterpReturn
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: mips64/op_const_4.S */
- /* const/4 vA, #+B */
- ext a2, rINST, 8, 4 # a2 <- A
- seh a0, rINST # sign extend B in rINST
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- sra a0, a0, 12 # shift B into its final position
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- +B
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: mips64/op_const_16.S */
- /* const/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: mips64/op_const.S */
- /* const vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: mips64/op_const_high16.S */
- /* const/high16 vAA, #+BBBB0000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- sll a0, a0, 16 # a0 <- BBBB0000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- +BBBB0000
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: mips64/op_const_wide_16.S */
- /* const-wide/16 vAA, #+BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- sign-extended BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: mips64/op_const_wide_32.S */
- /* const-wide/32 vAA, #+BBBBbbbb */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (high)
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: mips64/op_const_wide.S */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- srl a4, rINST, 8 # a4 <- AA
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a1, 4(rPC) # a1 <- BBBB (low middle)
- lh a2, 6(rPC) # a2 <- hhhh (high middle)
- lh a3, 8(rPC) # a3 <- HHHH (high)
- FETCH_ADVANCE_INST 5 # advance rPC, load rINST
- ins a0, a1, 16, 16 # a0 = BBBBbbbb
- ins a2, a3, 16, 16 # a2 = HHHHhhhh
- dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: mips64/op_const_wide_high16.S */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- srl a2, rINST, 8 # a2 <- AA
- lh a0, 2(rPC) # a0 <- BBBB
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- dsll32 a0, a0, 16 # a0 <- BBBB000000000000
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: mips64/op_const_string.S */
-/* File: mips64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: mips64/op_const_string_jumbo.S */
- /* const/string vAA, String//BBBBBBBB */
- .extern MterpConstString
- EXPORT_PC
- lh a0, 2(rPC) # a0 <- bbbb (low)
- lh a4, 4(rPC) # a4 <- BBBB (high)
- srl a1, rINST, 8 # a1 <- AA
- ins a0, a4, 16, 16 # a0 <- BBBBbbbb
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstString # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 3 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 3 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: mips64/op_const_class.S */
-/* File: mips64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstClass # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: mips64/op_monitor_enter.S */
- /*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- .extern artLockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artLockObjectFromCode
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: mips64/op_monitor_exit.S */
- /*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- .extern artUnlockObjectFromCode
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (object)
- move a1, rSELF # a1 <- self
- jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj)
- bnezc v0, MterpException
- FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: mips64/op_check_cast.S */
- /*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class//BBBB */
- .extern MterpCheckCast
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpCheckCast # (index, &obj, method, self)
- PREFETCH_INST 2
- bnez v0, MterpPossibleException
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: mips64/op_instance_of.S */
- /*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class//CCCC */
- .extern MterpInstanceOf
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- CCCC
- srl a1, rINST, 12 # a1 <- B
- dlsa a1, a1, rFP, 2 # a1 <- &object
- ld a2, OFF_FP_METHOD(rFP) # a2 <- method
- move a3, rSELF # a3 <- self
- jal MterpInstanceOf # (index, &obj, method, self)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a1, MterpException
- ADVANCE 2 # advance rPC
- SET_VREG v0, a2 # vA <- v0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: mips64/op_array_length.S */
- /*
- * Return the length of an array.
- */
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a0, a1 # a0 <- vB (object ref)
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a0, common_errNullObject # yup, fail
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a3, a2 # vB <- length
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: mips64/op_new_instance.S */
- /*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class//BBBB */
- .extern MterpNewInstance
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rSELF
- move a2, rINST
- jal MterpNewInstance # (shadow_frame, self, inst_data)
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: mips64/op_new_array.S */
- /*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class//CCCC */
- .extern MterpNewArray
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpNewArray
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: mips64/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rSELF
- jal MterpFilledNewArray
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: mips64/op_filled_new_array_range.S */
-/* File: mips64/op_filled_new_array.S */
- /*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rSELF
- jal MterpFilledNewArrayRange
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: mips64/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- .extern MterpFillArrayData
- EXPORT_PC
- lh a1, 2(rPC) # a1 <- bbbb (lo)
- lh a0, 4(rPC) # a0 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a1, a0, 16, 16 # a1 <- BBBBbbbb
- GET_VREG_U a0, a3 # a0 <- vAA (array object)
- dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.)
- jal MterpFillArrayData # (obj, payload)
- beqzc v0, MterpPossibleException # exception?
- FETCH_ADVANCE_INST 3 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: mips64/op_throw.S */
- /*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA (exception object)
- beqzc a0, common_errNullObject
- sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj
- b MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: mips64/op_goto.S */
- /*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- srl rINST, rINST, 8
- seb rINST, rINST # rINST <- offset (sign-extended AA)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: mips64/op_goto_16.S */
- /*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: mips64/op_goto_32.S */
- /*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0".
- */
- /* goto/32 +AAAAAAAA */
- lh rINST, 2(rPC) # rINST <- aaaa (low)
- lh a1, 4(rPC) # a1 <- AAAA (high)
- ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa)
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: mips64/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBBBBBB */
- .extern MterpDoPackedSwitch
- lh a0, 2(rPC) # a0 <- bbbb (lo)
- lh a1, 4(rPC) # a1 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a0, a1, 16, 16 # a0 <- BBBBbbbb
- GET_VREG a1, a3 # a1 <- vAA
- dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
- jal MterpDoPackedSwitch # v0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: mips64/op_sparse_switch.S */
-/* File: mips64/op_packed_switch.S */
- /*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBBBBBB */
- .extern MterpDoSparseSwitch
- lh a0, 2(rPC) # a0 <- bbbb (lo)
- lh a1, 4(rPC) # a1 <- BBBB (hi)
- srl a3, rINST, 8 # a3 <- AA
- ins a0, a1, 16, 16 # a0 <- BBBBbbbb
- GET_VREG a1, a3 # a1 <- vAA
- dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2
- jal MterpDoSparseSwitch # v0 <- code-unit branch offset
- move rINST, v0
- b MterpCommonTakenBranchNoFlags
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: mips64/op_cmpl_float.S */
-/* File: mips64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- cmp.eq.s f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.s f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: mips64/op_cmpg_float.S */
-/* File: mips64/fcmp.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-float, cmpg-float
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- cmp.eq.s f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.s f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.s f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: mips64/op_cmpl_double.S */
-/* File: mips64/fcmpWide.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- cmp.eq.d f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 0
- cmp.lt.d f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: mips64/op_cmpg_double.S */
-/* File: mips64/fcmpWide.S */
- /*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * For: cmpl-double, cmpg-double
- */
- /* op vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- cmp.eq.d f2, f0, f1
- li a0, 0
- bc1nez f2, 1f # done if vBB == vCC (ordered)
- .if 1
- cmp.lt.d f2, f0, f1
- li a0, -1
- bc1nez f2, 1f # done if vBB < vCC (ordered)
- li a0, 1 # vBB > vCC or unordered
- .else
- cmp.lt.d f2, f1, f0
- li a0, 1
- bc1nez f2, 1f # done if vBB > vCC (ordered)
- li a0, -1 # vBB < vCC or unordered
- .endif
-1:
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: mips64/op_cmp_long.S */
- /* cmp-long vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- slt a2, a0, a1
- slt a0, a1, a0
- subu a0, a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- result
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: mips64/op_if_eq.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- beqc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: mips64/op_if_ne.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bnec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: mips64/op_if_lt.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bltc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: mips64/op_if_ge.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bgec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: mips64/op_if_gt.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- bgtc a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: mips64/op_if_le.S */
-/* File: mips64/bincmp.S */
- /*
- * Generic two-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-le" you would use "le".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC)
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- blec a0, a1, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: mips64/op_if_eqz.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- beqzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: mips64/op_if_nez.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bnezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: mips64/op_if_ltz.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bltzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: mips64/op_if_gez.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bgezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: mips64/op_if_gtz.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- bgtzc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: mips64/op_if_lez.S */
-/* File: mips64/zcmp.S */
- /*
- * Generic one-operand compare-and-branch operation. Provide a "condition"
- * fragment that specifies the comparison to perform, e.g. for
- * "if-lez" you would use "le".
- *
- * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- srl a2, rINST, 8 # a2 <- AA
- lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB)
- GET_VREG a0, a2 # a0 <- vAA
- blezc a0, MterpCommonTakenBranchNoFlags
- li v0, JIT_CHECK_OSR # possible OSR re-entry?
- beqc rPROFILE, v0, .L_check_not_taken_osr
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: mips64/op_unused_3e.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: mips64/op_unused_3f.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: mips64/op_unused_40.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: mips64/op_unused_41.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: mips64/op_unused_42.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: mips64/op_unused_43.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: mips64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 2
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: mips64/op_aget_wide.S */
- /*
- * Array get, 64 bits. vAA <- vBB[vCC].
- *
- */
- /* aget-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
- dinsu a2, a3, 32, 32 # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: mips64/op_aget_object.S */
- /*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- .extern artAGetObjectFromMterp
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- jal artAGetObjectFromMterp # (array, index)
- ld a1, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a4, rINST, 8 # a4 <- AA
- PREFETCH_INST 2
- bnez a1, MterpException
- SET_VREG_OBJECT v0, a4 # vAA <- v0
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: mips64/op_aget_boolean.S */
-/* File: mips64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: mips64/op_aget_byte.S */
-/* File: mips64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: mips64/op_aget_char.S */
-/* File: mips64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: mips64/op_aget_short.S */
-/* File: mips64/op_aget.S */
- /*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- * NOTE: assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC]
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a2, a4 # vAA <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: mips64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 2
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: mips64/op_aput_wide.S */
- /*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- GET_VREG_WIDE a2, a4 # a2 <- vAA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
- dsrl32 a2, a2, 0
- sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: mips64/op_aput_object.S */
- /*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- .extern MterpAputObject
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpAputObject
- beqzc v0, MterpPossibleException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: mips64/op_aput_boolean.S */
-/* File: mips64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: mips64/op_aput_byte.S */
-/* File: mips64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 0
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: mips64/op_aput_char.S */
-/* File: mips64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: mips64/op_aput_short.S */
-/* File: mips64/op_aput.S */
- /*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- * NOTE: this assumes data offset for arrays is the same for all non-wide types.
- * If this changes, specialize.
- */
- /* op vAA, vBB, vCC */
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- srl a4, rINST, 8 # a4 <- AA
- GET_VREG_U a0, a2 # a0 <- vBB (array object)
- GET_VREG a1, a3 # a1 <- vCC (requested index)
- beqz a0, common_errNullObject # bail if null array object
- lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length
- .if 1
- # [d]lsa does not support shift count of 0.
- dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width
- .else
- daddu a0, a1, a0 # a0 <- arrayObj + index*width
- .endif
- bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_VREG a2, a4 # a2 <- vAA
- GET_INST_OPCODE v0 # extract opcode from rINST
- sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetU32
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetU32
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: mips64/op_iget_wide.S */
- /*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- .extern MterpIGetU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetU64
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- SET_VREG_WIDE v0, a2 # fp[A] <- v0
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: mips64/op_iget_object.S */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetObj
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetObj
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 1
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: mips64/op_iget_boolean.S */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetU8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetU8
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: mips64/op_iget_byte.S */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetI8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetI8
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: mips64/op_iget_char.S */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetU16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetU16
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: mips64/op_iget_short.S */
-/* File: mips64/op_iget.S */
- /*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- .extern MterpIGetI16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer
- move a3, rSELF # a3 <- self
- jal MterpIGetI16
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- .else
- SET_VREG v0, a2 # fp[A] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: mips64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU32
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutU32
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: mips64/op_iput_wide.S */
- /* iput-wide vA, vB, field//CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- dlsa a2, a2, rFP, 2 # a2 <- &fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutU64
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: mips64/op_iput_object.S */
- .extern MterpIPutObj
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpIPutObj
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: mips64/op_iput_boolean.S */
-/* File: mips64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutU8
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: mips64/op_iput_byte.S */
-/* File: mips64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutI8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutI8
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: mips64/op_iput_char.S */
-/* File: mips64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutU16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutU16
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: mips64/op_iput_short.S */
-/* File: mips64/op_iput.S */
- /*
- * General 32-bit instance field put.
- *
- * for: iput, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field//CCCC */
- .extern MterpIPutI16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref CCCC
- srl a1, rINST, 12 # a1 <- B
- GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- GET_VREG a2, a2 # a2 <- fp[A]
- ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer
- PREFETCH_INST 2
- jal MterpIPutI16
- bnez v0, MterpPossibleException # bail out
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetU32
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetU32
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
-
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: mips64/op_sget_wide.S */
- /*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field//BBBB */
- .extern MterpSGetU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetU64
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a4, rINST, 8 # a4 <- AA
- bnez a3, MterpException # bail out
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG_WIDE v0, a4
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: mips64/op_sget_object.S */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetObj
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetObj
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
-
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 1
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: mips64/op_sget_boolean.S */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetU8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetU8
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
- and v0, v0, 0xff
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: mips64/op_sget_byte.S */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetI8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetI8
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
- seb v0, v0
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: mips64/op_sget_char.S */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetU16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetU16
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
- and v0, v0, 0xffff
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: mips64/op_sget_short.S */
-/* File: mips64/op_sget.S */
- /*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSGetI16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- ld a1, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpSGetI16
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- srl a2, rINST, 8 # a2 <- AA
- seh v0, v0
- PREFETCH_INST 2
- bnez a3, MterpException # bail out
- .if 0
- SET_VREG_OBJECT v0, a2 # fp[AA] <- v0
- .else
- SET_VREG v0, a2 # fp[AA] <- v0
- .endif
- ADVANCE 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: mips64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSPutU32
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutU32
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: mips64/op_sput_wide.S */
- /*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field//BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a1, rINST, 8 # a2 <- AA
- dlsa a1, a1, rFP, 2
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutU64
- bnezc v0, MterpException # 0 on success, -1 on failure
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: mips64/op_sput_object.S */
- .extern MterpSPutObj
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- move a3, rSELF
- jal MterpSPutObj
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: mips64/op_sput_boolean.S */
-/* File: mips64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSPutU8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutU8
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: mips64/op_sput_byte.S */
-/* File: mips64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSPutI8
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutI8
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: mips64/op_sput_char.S */
-/* File: mips64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSPutU16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutU16
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: mips64/op_sput_short.S */
-/* File: mips64/op_sput.S */
- /*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field//BBBB */
- .extern MterpSPutI16
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- field ref BBBB
- srl a3, rINST, 8 # a3 <- AA
- GET_VREG a1, a3 # a1 <- fp[AA]
- ld a2, OFF_FP_METHOD(rFP)
- move a3, rSELF
- PREFETCH_INST 2 # Get next inst, but don't advance rPC
- jal MterpSPutI16
- bnezc v0, MterpException # 0 on success
- ADVANCE 2 # Past exception point - now advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: mips64/op_invoke_virtual.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtual
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: mips64/op_invoke_super.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeSuper
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: mips64/op_invoke_direct.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeDirect
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: mips64/op_invoke_static.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeStatic
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: mips64/op_invoke_interface.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeInterface
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
- /*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: mips64/op_return_void_no_barrier.S */
- .extern MterpSuspendCheck
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- move a0, rSELF
- and ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- beqzc ra, 1f
- jal MterpSuspendCheck # (self)
-1:
- li a0, 0
- b MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: mips64/op_invoke_virtual_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: mips64/op_invoke_super_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeSuperRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: mips64/op_invoke_direct_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeDirectRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: mips64/op_invoke_static_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeStaticRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: mips64/op_invoke_interface_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeInterfaceRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: mips64/op_unused_79.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: mips64/op_unused_7a.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: mips64/op_neg_int.S */
-/* File: mips64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- subu a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: mips64/op_not_int.S */
-/* File: mips64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- nor a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: mips64/op_neg_long.S */
-/* File: mips64/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * For: not-long, neg-long
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- dsubu a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: mips64/op_not_long.S */
-/* File: mips64/unopWide.S */
- /*
- * Generic 64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * For: not-long, neg-long
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- nor a0, zero, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: mips64/op_neg_float.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- neg.s f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: mips64/op_neg_double.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- neg.d f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: mips64/op_int_to_long.S */
- /* int-to-long vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits)
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- vB
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: mips64/op_int_to_float.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.w f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: mips64/op_int_to_double.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.w f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: mips64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: mips64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_VREG a0, a3 # a0 <- vB
- GET_INST_OPCODE v0 # extract opcode from rINST
- .if 0
- SET_VREG_OBJECT a0, a2 # vA <- vB
- .else
- SET_VREG a0, a2 # vA <- vB
- .endif
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: mips64/op_long_to_float.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.l f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: mips64/op_long_to_double.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.l f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: mips64/op_float_to_int.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.w.s f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: mips64/op_float_to_long.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.l.s f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: mips64/op_float_to_double.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_FLOAT f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.d.s f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: mips64/op_double_to_int.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.w.d f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: mips64/op_double_to_long.S */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- trunc.l.d f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: mips64/op_double_to_float.S */
- /*
- * Conversion from or to floating-point happens in a floating-point register.
- * Therefore we load the input and store the output into or from a
- * floating-point register irrespective of the type.
- */
-/* File: mips64/fcvtHeader.S */
- /*
- * Loads a specified register from vB. Used primarily for conversions
- * from or to a floating-point type.
- *
- * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
- * store the result in vA and jump to the next instruction.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- */
- ext a1, rINST, 8, 4 # a1 <- A
- srl a2, rINST, 12 # a2 <- B
- GET_VREG_DOUBLE f0, a2
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
-
- cvt.s.d f0, f0
-/* File: mips64/fcvtFooter.S */
- /*
- * Stores a specified register containing the result of conversion
- * from or to a floating-point type and jumps to the next instruction.
- *
- * Expects a1 to contain the destination Dalvik register number.
- * a1 is set up by fcvtHeader.S.
- *
- * For: int-to-float, int-to-double, long-to-float, long-to-double,
- * float-to-int, float-to-long, float-to-double, double-to-int,
- * double-to-long, double-to-float, neg-float, neg-double.
- *
- * Note that this file can't be included after a break in other files
- * and in those files its contents appear as a copy.
- * See: float-to-int, float-to-long, double-to-int, double-to-long.
- */
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a1
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: mips64/op_int_to_byte.S */
-/* File: mips64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- seb a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: mips64/op_int_to_char.S */
-/* File: mips64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- and a0, a0, 0xffff # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: mips64/op_int_to_short.S */
-/* File: mips64/unop.S */
- /*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "a0 = op a0".
- *
- * for: int-to-byte, int-to-char, int-to-short,
- * not-int, neg-int
- */
- /* unop vA, vB */
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- ext a2, rINST, 8, 4 # a2 <- A
- # optional op
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- seh a0, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: mips64/op_add_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: mips64/op_sub_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: mips64/op_mul_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: mips64/op_div_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: mips64/op_rem_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: mips64/op_and_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: mips64/op_or_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: mips64/op_xor_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: mips64/op_shl_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: mips64/op_shr_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: mips64/op_ushr_int.S */
-/* File: mips64/binop.S */
- /*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG a0, a2 # a0 <- vBB
- GET_VREG a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: mips64/op_add_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- daddu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: mips64/op_sub_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: mips64/op_mul_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dmul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: mips64/op_div_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: mips64/op_rem_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dmod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: mips64/op_and_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: mips64/op_or_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: mips64/op_xor_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: mips64/op_shl_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: mips64/op_shr_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: mips64/op_ushr_long.S */
-/* File: mips64/binopWide.S */
- /*
- * Generic 64-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vCC (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
- * xor-long, shl-long, shr-long, ushr-long
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_WIDE a0, a2 # a0 <- vBB
- GET_VREG_WIDE a1, a3 # a1 <- vCC
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a4 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: mips64/op_add_float.S */
-/* File: mips64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- add.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: mips64/op_sub_float.S */
-/* File: mips64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- sub.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: mips64/op_mul_float.S */
-/* File: mips64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- mul.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: mips64/op_div_float.S */
-/* File: mips64/fbinop.S */
- /*:
- * Generic 32-bit floating-point operation.
- *
- * For: add-float, sub-float, mul-float, div-float.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f0, a2 # f0 <- vBB
- GET_VREG_FLOAT f1, a3 # f1 <- vCC
- div.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: mips64/op_rem_float.S */
- /* rem-float vAA, vBB, vCC */
- .extern fmodf
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_FLOAT f12, a2 # f12 <- vBB
- GET_VREG_FLOAT f13, a3 # f13 <- vCC
- jal fmodf # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: mips64/op_add_double.S */
-/* File: mips64/fbinopWide.S */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- add.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: mips64/op_sub_double.S */
-/* File: mips64/fbinopWide.S */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- sub.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: mips64/op_mul_double.S */
-/* File: mips64/fbinopWide.S */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- mul.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: mips64/op_div_double.S */
-/* File: mips64/fbinopWide.S */
- /*:
- * Generic 64-bit floating-point operation.
- *
- * For: add-double, sub-double, mul-double, div-double.
- * form: <op> f0, f0, f1
- */
- /* binop vAA, vBB, vCC */
- srl a4, rINST, 8 # a4 <- AA
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f0, a2 # f0 <- vBB
- GET_VREG_DOUBLE f1, a3 # f1 <- vCC
- div.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: mips64/op_rem_double.S */
- /* rem-double vAA, vBB, vCC */
- .extern fmod
- lbu a2, 2(rPC) # a2 <- BB
- lbu a3, 3(rPC) # a3 <- CC
- GET_VREG_DOUBLE f12, a2 # f12 <- vBB
- GET_VREG_DOUBLE f13, a3 # f13 <- vCC
- jal fmod # f0 <- f12 op f13
- srl a4, rINST, 8 # a4 <- AA
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a4 # vAA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: mips64/op_add_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: mips64/op_sub_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- subu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: mips64/op_mul_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: mips64/op_div_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: mips64/op_rem_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: mips64/op_and_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: mips64/op_or_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: mips64/op_xor_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: mips64/op_shl_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: mips64/op_shr_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: mips64/op_ushr_int_2addr.S */
-/* File: mips64/binop2addr.S */
- /*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (INT_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a2 # a0 <- vA
- GET_VREG a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: mips64/op_add_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- daddu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: mips64/op_sub_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsubu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: mips64/op_mul_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dmul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: mips64/op_div_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- ddiv a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: mips64/op_rem_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dmod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: mips64/op_and_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: mips64/op_or_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: mips64/op_xor_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: mips64/op_shl_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: mips64/op_shr_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips64/op_ushr_long_2addr.S */
-/* File: mips64/binopWide2addr.S */
- /*
- * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be a MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * vB (a1). Useful for integer division and modulus. Note that we
- * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
- * correctly.
- *
- * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
- * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
- * shl-long/2addr, shr-long/2addr, ushr-long/2addr
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_WIDE a0, a2 # a0 <- vA
- GET_VREG_WIDE a1, a3 # a1 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- # optional op
- dsrl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_WIDE a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: mips64/op_add_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- add.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: mips64/op_sub_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- sub.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: mips64/op_mul_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- mul.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: mips64/op_div_float_2addr.S */
-/* File: mips64/fbinop2addr.S */
- /*:
- * Generic 32-bit "/2addr" floating-point operation.
- *
- * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f0, a2 # f0 <- vA
- GET_VREG_FLOAT f1, a3 # f1 <- vB
- div.s f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: mips64/op_rem_float_2addr.S */
- /* rem-float/2addr vA, vB */
- .extern fmodf
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_FLOAT f12, a2 # f12 <- vA
- GET_VREG_FLOAT f13, a3 # f13 <- vB
- jal fmodf # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_FLOAT f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: mips64/op_add_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- add.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: mips64/op_sub_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- sub.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: mips64/op_mul_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- mul.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: mips64/op_div_double_2addr.S */
-/* File: mips64/fbinopWide2addr.S */
- /*:
- * Generic 64-bit "/2addr" floating-point operation.
- *
- * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
- * form: <op> f0, f0, f1
- */
- /* binop/2addr vA, vB */
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f0, a2 # f0 <- vA
- GET_VREG_DOUBLE f1, a3 # f1 <- vB
- div.d f0, f0, f1 # f0 <- f0 op f1
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: mips64/op_rem_double_2addr.S */
- /* rem-double/2addr vA, vB */
- .extern fmod
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG_DOUBLE f12, a2 # f12 <- vA
- GET_VREG_DOUBLE f13, a3 # f13 <- vB
- jal fmod # f0 <- f12 op f13
- ext a2, rINST, 8, 4 # a2 <- A
- FETCH_ADVANCE_INST 1 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG_DOUBLE f0, a2 # vA <- f0
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: mips64/op_add_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: mips64/op_rsub_int.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: mips64/op_mul_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: mips64/op_div_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: mips64/op_rem_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: mips64/op_and_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: mips64/op_or_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: mips64/op_xor_int_lit16.S */
-/* File: mips64/binopLit16.S */
- /*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CCCC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
- * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- lh a1, 2(rPC) # a1 <- sign-extended CCCC
- ext a2, rINST, 8, 4 # a2 <- A
- ext a3, rINST, 12, 4 # a3 <- B
- GET_VREG a0, a3 # a0 <- vB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: mips64/op_add_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- addu a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips64/op_rsub_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- subu a0, a1, a0 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: mips64/op_mul_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mul a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: mips64/op_div_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- div a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: mips64/op_rem_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 1
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- mod a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: mips64/op_and_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- and a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: mips64/op_or_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- or a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: mips64/op_xor_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- xor a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: mips64/op_shl_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sll a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: mips64/op_shr_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- sra a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips64/op_ushr_int_lit8.S */
-/* File: mips64/binopLit8.S */
- /*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = a0 op a1".
- * This could be an MIPS instruction or a function call. (If the result
- * comes back in a register other than a0, you can override "result".)
- *
- * If "chkzero" is set to 1, we perform a divide-by-zero check on
- * CC (a1). Useful for integer division and modulus.
- *
- * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
- * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- lbu a3, 2(rPC) # a3 <- BB
- lb a1, 3(rPC) # a1 <- sign-extended CC
- srl a2, rINST, 8 # a2 <- AA
- GET_VREG a0, a3 # a0 <- vBB
- .if 0
- beqz a1, common_errDivideByZero # is second operand zero?
- .endif
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- # optional op
- srl a0, a0, a1 # a0 <- op, a0-a3 changed
- GET_INST_OPCODE v0 # extract opcode from rINST
- SET_VREG a0, a2 # vAA <- a0
- GOTO_OPCODE v0 # jump to next instruction
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: mips64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lw a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: mips64/op_iget_wide_quick.S */
- /* iget-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a4, 2(rPC) # a4 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- daddu a4, a3, a4 # create direct pointer
- lw a0, 0(a4)
- lw a1, 4(a4)
- dinsu a0, a1, 32, 32
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG_WIDE a0, a2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: mips64/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset//CCCC */
- .extern artIGetObjectFromMterp
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- EXPORT_PC
- GET_VREG_U a0, a2 # a0 <- object we're operating on
- jal artIGetObjectFromMterp # (obj, offset)
- ld a3, THREAD_EXCEPTION_OFFSET(rSELF)
- ext a2, rINST, 8, 4 # a2 <- A
- PREFETCH_INST 2
- bnez a3, MterpPossibleException # bail out
- SET_VREG_OBJECT v0, a2 # fp[A] <- v0
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: mips64/op_iput_quick.S */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sw a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: mips64/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a3, 2(rPC) # a3 <- field byte offset
- GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer
- ext a0, rINST, 8, 4 # a0 <- A
- beqz a2, common_errNullObject # object was null
- GET_VREG_WIDE a0, a0 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a2, a3 # create a direct pointer
- sw a0, 0(a1)
- dsrl32 a0, a0, 0
- sw a0, 4(a1)
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: mips64/op_iput_object_quick.S */
- .extern MterpIputObjectQuick
- EXPORT_PC
- daddu a0, rFP, OFF_FP_SHADOWFRAME
- move a1, rPC
- move a2, rINST
- jal MterpIputObjectQuick
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips64/op_invoke_virtual_quick.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualQuick
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips64/op_invoke_virtual_range_quick.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeVirtualQuickRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: mips64/op_iput_boolean_quick.S */
-/* File: mips64/op_iput_quick.S */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sb a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: mips64/op_iput_byte_quick.S */
-/* File: mips64/op_iput_quick.S */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sb a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: mips64/op_iput_char_quick.S */
-/* File: mips64/op_iput_quick.S */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sh a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: mips64/op_iput_short_quick.S */
-/* File: mips64/op_iput_quick.S */
- /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer
- ext a2, rINST, 8, 4 # a2 <- A
- beqz a3, common_errNullObject # object was null
- GET_VREG a0, a2 # a0 <- fp[A]
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- daddu a1, a1, a3
- sh a0, 0(a1) # obj.field <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: mips64/op_iget_boolean_quick.S */
-/* File: mips64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lbu a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: mips64/op_iget_byte_quick.S */
-/* File: mips64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lb a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: mips64/op_iget_char_quick.S */
-/* File: mips64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lhu a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: mips64/op_iget_short_quick.S */
-/* File: mips64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset//CCCC */
- srl a2, rINST, 12 # a2 <- B
- lhu a1, 2(rPC) # a1 <- field byte offset
- GET_VREG_U a3, a2 # a3 <- object we're operating on
- ext a4, rINST, 8, 4 # a4 <- A
- daddu a1, a1, a3
- beqz a3, common_errNullObject # object was null
- lh a0, 0(a1) # a0 <- obj.field
- FETCH_ADVANCE_INST 2 # advance rPC, load rINST
- SET_VREG a0, a4 # fp[A] <- a0
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: mips64/op_unused_f3.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: mips64/op_unused_f4.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: mips64/op_unused_f5.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: mips64/op_unused_f6.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: mips64/op_unused_f7.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: mips64/op_unused_f8.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: mips64/op_unused_f9.S */
-/* File: mips64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- b MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: mips64/op_invoke_polymorphic.S */
-/* File: mips64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokePolymorphic
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 4
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips64/op_invoke_polymorphic_range.S */
-/* File: mips64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokePolymorphicRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 4
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: mips64/op_invoke_custom.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeCustom
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: mips64/op_invoke_custom_range.S */
-/* File: mips64/invoke.S */
- /*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- .extern MterpShouldSwitchInterpreters
- EXPORT_PC
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- move a3, rINST
- jal MterpInvokeCustomRange
- beqzc v0, MterpException
- FETCH_ADVANCE_INST 3
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: mips64/op_const_method_handle.S */
-/* File: mips64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstMethodHandle # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: mips64/op_const_method_type.S */
-/* File: mips64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- lhu a0, 2(rPC) # a0 <- BBBB
- srl a1, rINST, 8 # a1 <- AA
- daddu a2, rFP, OFF_FP_SHADOWFRAME
- move a3, rSELF
- jal MterpConstMethodType # (index, tgt_reg, shadow_frame, self)
- PREFETCH_INST 2 # load rINST
- bnez v0, MterpPossibleException # let reference interpreter deal with it.
- ADVANCE 2 # advance rPC
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-
- .balign 128
-/* File: mips64/instruction_end.S */
-
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: mips64/instruction_start_sister.S */
-
- .global artMterpAsmSisterStart
- .text
- .balign 4
-artMterpAsmSisterStart:
-
-/* File: mips64/instruction_end_sister.S */
-
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
-
-/* File: mips64/instruction_start_alt.S */
-
- .global artMterpAsmAltInstructionStart
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (0 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (1 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (2 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (3 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (4 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (5 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (6 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (7 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (8 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (9 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (10 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (11 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (12 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (13 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (14 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (15 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (16 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (17 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (18 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (19 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (20 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (21 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (22 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (23 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (24 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (25 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (26 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (27 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (28 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (29 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (30 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (31 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (32 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (33 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (34 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (35 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (36 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (37 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (38 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (39 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (40 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (41 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (42 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (43 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (44 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (45 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (46 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (47 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (48 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (49 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (50 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (51 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (52 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (53 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (54 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (55 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (56 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (57 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (58 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (59 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (60 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (61 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (62 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (63 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (64 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (65 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (66 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (67 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (68 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (69 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (70 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (71 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (72 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (73 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (74 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (75 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (76 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (77 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (78 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (79 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (80 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (81 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (82 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (83 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (84 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (85 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (86 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (87 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (88 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (89 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (90 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (91 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (92 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (93 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (94 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (95 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (96 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (97 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (98 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (99 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (100 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (101 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (102 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (103 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (104 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (105 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (106 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (107 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (108 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (109 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (110 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (111 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (112 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (113 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (114 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (115 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (116 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (117 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (118 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (119 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (120 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (121 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (122 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (123 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (124 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (125 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (126 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (127 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (128 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (129 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (130 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (131 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (132 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (133 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (134 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (135 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (136 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (137 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (138 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (139 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (140 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (141 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (142 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (143 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (144 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (145 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (146 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (147 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (148 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (149 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (150 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (151 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (152 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (153 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (154 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (155 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (156 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (157 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (158 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (159 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (160 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (161 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (162 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (163 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (164 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (165 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (166 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (167 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (168 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (169 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (170 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (171 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (172 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (173 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (174 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (175 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (176 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (177 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (178 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (179 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (180 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (181 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (182 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (183 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (184 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (185 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (186 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (187 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (188 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (189 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (190 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (191 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (192 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (193 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (194 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (195 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (196 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (197 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (198 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (199 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (200 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (201 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (202 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (203 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (204 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (205 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (206 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (207 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (208 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (209 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (210 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (211 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (212 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (213 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (214 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (215 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (216 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (217 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (218 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (219 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (220 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (221 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (222 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (223 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (224 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (225 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (226 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (227 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (228 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (229 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (230 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (231 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (232 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (233 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (234 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (235 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (236 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (237 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (238 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (239 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (240 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (241 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (242 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (243 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (244 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (245 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (246 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (247 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (248 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (249 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (250 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (251 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (252 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (253 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (254 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: mips64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Note that the call to MterpCheckBefore is done as a tail call.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- dla ra, artMterpAsmInstructionStart
- dla t9, MterpCheckBefore
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rPC
- daddu ra, ra, (255 * 128) # Addr of primary handler.
- jalr zero, t9 # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
- .balign 128
-/* File: mips64/instruction_end_alt.S */
-
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
-
-/* File: mips64/footer.S */
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
- .extern MterpLogDivideByZeroException
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogDivideByZeroException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogArrayIndexException
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogArrayIndexException
-#endif
- b MterpCommonFallback
-
- .extern MterpLogNullObjectException
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogNullObjectException
-#endif
- b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- ld a0, THREAD_EXCEPTION_OFFSET(rSELF)
- beqzc a0, MterpFallback # If not, fall back to reference interpreter.
- /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
- .extern MterpHandleException
- .extern MterpShouldSwitchInterpreters
-MterpException:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpHandleException # (self, shadow_frame)
- beqzc v0, MterpExceptionReturn # no local catch, back to caller.
- ld a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
- lwu a1, OFF_FP_DEX_PC(rFP)
- REFRESH_IBASE
- dlsa rPC, a1, a0, 1 # generate new dex_pc_ptr
- /* Do we need to switch interpreters? */
- jal MterpShouldSwitchInterpreters
- bnezc v0, MterpFallback
- /* resume execution at catch block */
- EXPORT_PC
- FETCH_INST
- GET_INST_OPCODE v0
- GOTO_OPCODE v0
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
- bgtzc rINST, .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
- li v0, JIT_CHECK_OSR
- beqc rPROFILE, v0, .L_osr_check
- bltc rPROFILE, v0, .L_resume_backward_branch
- dsubu rPROFILE, 1
- beqzc rPROFILE, .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- lw ra, THREAD_FLAGS_OFFSET(rSELF)
- REFRESH_IBASE
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- and ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
- bnezc ra, .L_suspend_request_pending
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_suspend_request_pending:
- EXPORT_PC
- move a0, rSELF
- jal MterpSuspendCheck # (self)
- bnezc v0, MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_no_count_backwards:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- bnec rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_backward_branch
-
-.L_forward_branch:
- li v0, JIT_CHECK_OSR # check for possible OSR re-entry
- beqc rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
- daddu a2, rINST, rINST # a2<- byte offset
- FETCH_ADVANCE_INST_RB a2 # update rPC, load rINST
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-.L_check_osr_forward:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- b .L_resume_forward_branch
-
-.L_add_batch:
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- ld a0, OFF_FP_METHOD(rFP)
- move a2, rSELF
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move rPROFILE, v0 # restore new hotness countdown to rPROFILE
- b .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- li a2, 2
- EXPORT_PC
- jal MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
- bnezc v0, MterpOnStackReplacement
- FETCH_ADVANCE_INST 2
- GET_INST_OPCODE v0 # extract opcode from rINST
- GOTO_OPCODE v0 # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rINST # rINST contains offset
- jal MterpLogOSR
-#endif
- li v0, 1 # Signal normal return
- b MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
- .extern MterpLogFallback
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- move a0, rSELF
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- jal MterpLogFallback
-#endif
-MterpCommonFallback:
- li v0, 0 # signal retry with reference interpreter.
- b MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA. Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- li v0, 1 # signal return to caller.
- b MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
- ld a2, OFF_FP_RESULT_REGISTER(rFP)
- sd a0, 0(a2)
- li v0, 1 # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- blez rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
- move rINST, v0 # stash return value
- /* Report cached hotness counts */
- ld a0, OFF_FP_METHOD(rFP)
- daddu a1, rFP, OFF_FP_SHADOWFRAME
- move a2, rSELF
- sh rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
- jal MterpAddHotnessBatch # (method, shadow_frame, self)
- move v0, rINST # restore return value
-
-.L_pop_and_return:
- ld s6, STACK_OFFSET_S6(sp)
- .cfi_restore 22
- ld s5, STACK_OFFSET_S5(sp)
- .cfi_restore 21
- ld s4, STACK_OFFSET_S4(sp)
- .cfi_restore 20
- ld s3, STACK_OFFSET_S3(sp)
- .cfi_restore 19
- ld s2, STACK_OFFSET_S2(sp)
- .cfi_restore 18
- ld s1, STACK_OFFSET_S1(sp)
- .cfi_restore 17
- ld s0, STACK_OFFSET_S0(sp)
- .cfi_restore 16
-
- ld ra, STACK_OFFSET_RA(sp)
- .cfi_restore 31
-
- ld t8, STACK_OFFSET_GP(sp)
- .cpreturn
- .cfi_restore 28
-
- .set noreorder
- jr ra
- daddu sp, sp, STACK_SIZE
- .cfi_adjust_cfa_offset -STACK_SIZE
-
- .cfi_endproc
- .set reorder
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
deleted file mode 100644
index 3f70919..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ /dev/null
@@ -1,12993 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'x86'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: x86/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
- eax, edx, ecx, st(0)-st(7)
-Callee save set:
- ebx, esi, edi, ebp
-Return regs:
- 32-bit in eax
- 64-bit in edx:eax (low-order 32 in eax)
- fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left. On entry to target, first
-parm is at 4(%esp). Traditional entry code is:
-
-functEntry:
- push %ebp # save old frame pointer
- mov %ebp,%esp # establish new frame pointer
- sub FrameSize,%esp # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPC esi interpreted program counter, used for fetching instructions
- rFP edi interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE edx base of instruction handler table
- rREFS ebp base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushl \_reg
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popl \_reg
- .cfi_adjust_cfa_offset -4
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE 28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16 + 16)
-#define IN_ARG2 (FRAME_SIZE + 16 + 12)
-#define IN_ARG1 (FRAME_SIZE + 16 + 8)
-#define IN_ARG0 (FRAME_SIZE + 16 + 4)
-/* Spill offsets relative to %esp */
-#define LOCAL0 (FRAME_SIZE - 4)
-#define LOCAL1 (FRAME_SIZE - 8)
-#define LOCAL2 (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3 ( 12)
-#define OUT_ARG2 ( 8)
-#define OUT_ARG1 ( 4)
-#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF IN_ARG0(%esp)
-#define rPC %esi
-#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP 0 // DWARF register number of the first argument register (eax).
-#define rFP %edi
-#define rINST %ebx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %edx
-#define rREFS %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
- movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwl (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(7), %eax
- addl rIBASE, %eax
- jmp *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leal 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- pxor \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/* File: x86/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa esp, 4
-
- /* Spill callee save regs */
- PUSH %ebp
- PUSH %edi
- PUSH %esi
- PUSH %ebx
-
- /* Allocate frame */
- subl $FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Load ShadowFrame pointer */
- movl IN_ARG2(%esp), %edx
-
- /* Remember the return register */
- movl IN_ARG3(%esp), %eax
- movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
- /* Remember the code_item */
- movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
- leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
- leal (rFP, %eax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea (%ecx, %eax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Set up for backwards branches & osr profiling */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpSetUpHotnessCountdown)
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/* File: x86/instruction_start.S */
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: x86/op_nop.S */
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: x86/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: x86/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: x86/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: x86/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: x86/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: x86/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 4(rPC), %ecx # ecx<- BBBB
- movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: x86/op_move_object.S */
-/* File: x86/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: x86/op_move_object_from16.S */
-/* File: x86/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: x86/op_move_object_16.S */
-/* File: x86/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if 1
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: x86/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: x86/op_move_result_wide.S */
- /* move-result-wide vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl 4(%eax), %ecx # Get high
- movl (%eax), %eax # Get low
- SET_VREG %eax, rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: x86/op_move_result_object.S */
-/* File: x86/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if 1
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: x86/op_move_exception.S */
- /* move-exception vAA */
- movl rSELF, %ecx
- movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
- movl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: x86/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: x86/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: x86/op_return_wide.S */
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: x86/op_return_object.S */
-/* File: x86/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: x86/op_const_4.S */
- /* const/4 vA, #+B */
- movsx rINSTbl, %eax # eax <-ssssssBx
- movl $0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $4, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: x86/op_const_16.S */
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINST # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: x86/op_const.S */
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINST # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: x86/op_const_high16.S */
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINST # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: x86/op_const_wide_16.S */
- /* const-wide/16 vAA, #+BBBB */
- movswl 2(rPC), %eax # eax <- ssssBBBB
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: x86/op_const_wide_32.S */
- /* const-wide/32 vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # eax <- BBBBbbbb
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: x86/op_const_wide.S */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movl 2(rPC), %eax # eax <- lsw
- movzbl rINSTbl, %ecx # ecx <- AA
- movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax, %ecx
- SET_VREG_HIGH rINST, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: x86/op_const_wide_high16.S */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- xorl %eax, %eax
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: x86/op_const_string.S */
-/* File: x86/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: x86/op_const_string_jumbo.S */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: x86/op_const_class.S */
-/* File: x86/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: x86/op_monitor_enter.S */
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artLockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: x86/op_monitor_exit.S */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: x86/op_check_cast.S */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- leal VREG_ADDRESS(rINST), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: x86/op_instance_of.S */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, %eax # eax <- BA
- sarl $4, %eax # eax <- B
- leal VREG_ADDRESS(%eax), %ecx # Get object address
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- andb $0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: x86/op_array_length.S */
-/*
- * Return the length of an array.
- */
- mov rINST, %eax # eax <- BA
- sarl $4, rINST # rINST <- B
- GET_VREG %ecx, rINST # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST, %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: x86/op_new_instance.S */
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG1(%esp)
- REFRESH_INST 34
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpNewInstance)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: x86/op_new_array.S */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 35
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpNewArray)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: x86/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL(MterpFilledNewArray)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: x86/op_filled_new_array_range.S */
-/* File: x86/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL(MterpFilledNewArrayRange)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: x86/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax, rINST # eax <- vAA (array object)
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: x86/op_throw.S */
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINST # eax<- vAA (exception object)
- testl %eax, %eax
- jz common_errNullObject
- movl rSELF,%ecx
- movl %eax, THREAD_EXCEPTION_OFFSET(%ecx)
- jmp MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: x86/op_goto.S */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbl rINSTbl, rINST # rINST <- ssssssAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: x86/op_goto_16.S */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswl 2(rPC), rINST # rINST <- ssssAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: x86/op_goto_32.S */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movl 2(rPC), rINST # rINST <- AAAAAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: x86/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL(MterpDoPackedSwitch)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: x86/op_sparse_switch.S */
-/* File: x86/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL(MterpDoSparseSwitch)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: x86/op_cmpl_float.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movss VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpl_float_nan_is_neg
- je .Lop_cmpl_float_finish
- jb .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
- incl %eax
- jmp .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
- decl %eax
-.Lop_cmpl_float_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: x86/op_cmpg_float.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movss VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpg_float_nan_is_pos
- je .Lop_cmpg_float_finish
- jb .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
- incl %eax
- jmp .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
- decl %eax
-.Lop_cmpg_float_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: x86/op_cmpl_double.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movsd VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpl_double_nan_is_neg
- je .Lop_cmpl_double_finish
- jb .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
- incl %eax
- jmp .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
- decl %eax
-.Lop_cmpl_double_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: x86/op_cmpg_double.S */
-/* File: x86/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movsd VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%ecx), %xmm0
- jp .Lop_cmpg_double_nan_is_pos
- je .Lop_cmpg_double_finish
- jb .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
- incl %eax
- jmp .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
- decl %eax
-.Lop_cmpg_double_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: x86/op_cmp_long.S */
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
- cmpl VREG_HIGH_ADDRESS(%ecx), %eax
- jl .Lop_cmp_long_smaller
- jg .Lop_cmp_long_bigger
- movzbl 2(rPC), %eax # eax <- BB, restore BB
- GET_VREG %eax, %eax # eax <- v[BB]
- sub VREG_ADDRESS(%ecx), %eax
- ja .Lop_cmp_long_bigger
- jb .Lop_cmp_long_smaller
-.Lop_cmp_long_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_cmp_long_bigger:
- movl $1, %eax
- jmp .Lop_cmp_long_finish
-
-.Lop_cmp_long_smaller:
- movl $-1, %eax
- jmp .Lop_cmp_long_finish
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: x86/op_if_eq.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jne 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: x86/op_if_ne.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- je 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: x86/op_if_lt.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jge 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: x86/op_if_ge.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jl 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: x86/op_if_gt.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jle 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: x86/op_if_le.S */
-/* File: x86/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- jg 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: x86/op_if_eqz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jne 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: x86/op_if_nez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- je 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: x86/op_if_ltz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jge 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: x86/op_if_gez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jl 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: x86/op_if_gtz.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jle 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: x86/op_if_lez.S */
-/* File: x86/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINST) # compare (vA, 0)
- jg 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: x86/op_unused_3e.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: x86/op_unused_3f.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: x86/op_unused_40.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: x86/op_unused_41.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: x86/op_unused_42.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: x86/op_unused_43.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movl MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: x86/op_aget_wide.S */
-/*
- * Array get, 64 bits. vAA <- vBB[vCC].
- */
- /* aget-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: x86/op_aget_object.S */
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: x86/op_aget_boolean.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: x86/op_aget_byte.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: x86/op_aget_char.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: x86/op_aget_short.S */
-/* File: x86/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- GET_VREG rINST, rINST
- movl rINST, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: x86/op_aput_wide.S */
-/*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
- movq %xmm0, (%eax) # vBB[vCC] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: x86/op_aput_object.S */
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 77
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpAputObject) # (array, index)
- RESTORE_IBASE
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: x86/op_aput_boolean.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST, rINST
- movb rINSTbl, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: x86/op_aput_byte.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST, rINST
- movb rINSTbl, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: x86/op_aput_char.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST, rINST
- movw rINSTw, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: x86/op_aput_short.S */
-/* File: x86/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST, rINST
- movw rINSTw, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetU32)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: x86/op_iget_wide.S */
-/*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetU64)
- mov rSELF, %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- SET_VREG %eax, rINST
- SET_VREG_HIGH %edx, rINST
- RESTORE_IBASE_FROM_SELF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: x86/op_iget_object.S */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetObj)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: x86/op_iget_boolean.S */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetU8)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: x86/op_iget_byte.S */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetI8)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: x86/op_iget_char.S */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetU16)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: x86/op_iget_short.S */
-/* File: x86/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetI16)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: x86/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU32
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutU32)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: x86/op_iput_wide.S */
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl,%ecx # ecx <- BA
- sarl $4,%ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf,rINSTbl # rINST <- A
- leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: x86/op_iput_object.S */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 91
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: x86/op_iput_boolean.S */
-/* File: x86/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU8
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutU8)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: x86/op_iput_byte.S */
-/* File: x86/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI8
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutI8)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: x86/op_iput_char.S */
-/* File: x86/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU16
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutU16)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: x86/op_iput_short.S */
-/* File: x86/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI16
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutI16)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU32
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetU32)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: x86/op_sget_wide.S */
-/*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field@BBBB */
- .extern MterpSGetU64
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetU64)
- movl rSELF, %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG %eax, rINST # fp[A]<- low part
- SET_VREG_HIGH %edx, rINST # fp[A+1]<- high part
- RESTORE_IBASE_FROM_SELF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: x86/op_sget_object.S */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetObj
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetObj)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 1
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: x86/op_sget_boolean.S */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU8
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetU8)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: x86/op_sget_byte.S */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI8
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetI8)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: x86/op_sget_char.S */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU16
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetU16)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: x86/op_sget_short.S */
-/* File: x86/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI16
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetI16)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: x86/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU32
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutU32)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: x86/op_sput_wide.S */
-/*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG1(%esp) # &fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: x86/op_sput_object.S */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 105
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: x86/op_sput_boolean.S */
-/* File: x86/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU8
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutU8)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: x86/op_sput_byte.S */
-/* File: x86/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutI8
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutI8)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: x86/op_sput_char.S */
-/* File: x86/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU16
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutU16)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: x86/op_sput_short.S */
-/* File: x86/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutI16
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutI16)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: x86/op_invoke_virtual.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 110
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtual)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: x86/op_invoke_super.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 111
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeSuper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: x86/op_invoke_direct.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 112
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeDirect)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: x86/op_invoke_static.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 113
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeStatic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: x86/op_invoke_interface.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 114
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeInterface)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: x86/op_return_void_no_barrier.S */
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: x86/op_invoke_virtual_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 116
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: x86/op_invoke_super_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 117
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeSuperRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: x86/op_invoke_direct_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 118
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeDirectRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: x86/op_invoke_static_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 119
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeStaticRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: x86/op_invoke_interface_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 120
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeInterfaceRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: x86/op_unused_79.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: x86/op_unused_7a.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: x86/op_neg_int.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- negl %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: x86/op_not_int.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- notl %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: x86/op_neg_long.S */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- negl %eax
- adcl $0, %ecx
- negl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: x86/op_not_long.S */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- notl %eax
- notl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: x86/op_neg_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
- fchs
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: x86/op_neg_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
- fchs
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: x86/op_int_to_long.S */
- /* int to long vA, vB */
- movzbl rINSTbl, %eax # eax <- +A
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movl rIBASE, %ecx # cltd trashes rIBASE/edx
- cltd # rINST:eax<- sssssssBBBBBBBB
- SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
- SET_VREG %eax, rINST # v[A+0] <- %eax
- movl %ecx, rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: x86/op_int_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: x86/op_int_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: x86/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: x86/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if 0
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: x86/op_long_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildll VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: x86/op_long_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fildll VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: x86/op_float_to_int.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 0
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 0
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 0
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_float_to_int_special_case # fix up result
-
-.Lop_float_to_int_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 0
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_int_special_case:
- fnstsw %ax
- sahf
- jp .Lop_float_to_int_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 0
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_int_finish
-.Lop_float_to_int_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 0
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_int_finish
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: x86/op_float_to_long.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 0
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 1
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 1
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_float_to_long_special_case # fix up result
-
-.Lop_float_to_long_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 1
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_float_to_long_special_case:
- fnstsw %ax
- sahf
- jp .Lop_float_to_long_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 1
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_long_finish
-.Lop_float_to_long_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 1
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_float_to_long_finish
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: x86/op_float_to_double.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstpl VREG_ADDRESS(%ecx) # vA <- %st0
- .if 1
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: x86/op_double_to_int.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 1
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 0
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 0
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_double_to_int_special_case # fix up result
-
-.Lop_double_to_int_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 0
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_int_special_case:
- fnstsw %ax
- sahf
- jp .Lop_double_to_int_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 0
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_int_finish
-.Lop_double_to_int_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 0
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_int_finish
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: x86/op_double_to_long.S */
-/* File: x86/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- .if 1
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $0xf, %cl # ecx <- A
- .if 1
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if 1
- movl $0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .Lop_double_to_long_special_case # fix up result
-
-.Lop_double_to_long_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if 1
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_double_to_long_special_case:
- fnstsw %ax
- sahf
- jp .Lop_double_to_long_isNaN
- adcl $-1, VREG_ADDRESS(%ecx)
- .if 1
- adcl $-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_long_finish
-.Lop_double_to_long_isNaN:
- movl $0, VREG_ADDRESS(%ecx)
- .if 1
- movl $0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .Lop_double_to_long_finish
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: x86/op_double_to_float.S */
-/* File: x86/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- andb $0xf, %cl # ecx <- A
-
- fstps VREG_ADDRESS(%ecx) # vA <- %st0
- .if 0
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: x86/op_int_to_byte.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movsbl %al, %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: x86/op_int_to_char.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movzwl %ax,%eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: x86/op_int_to_short.S */
-/* File: x86/unop.S */
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf,%cl # ecx <- A
- movswl %ax, %eax
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: x86/op_add_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- addl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: x86/op_sub_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- subl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: x86/op_mul_int.S */
- /*
- * 32-bit binary multiplication.
- */
- /* mul vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- mov rIBASE, LOCAL0(%esp)
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- mov LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: x86/op_div_int.S */
-/* File: x86/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # ecx <- vCC
- mov rIBASE, LOCAL0(%esp)
- testl %ecx, %ecx
- je common_errDivideByZero
- movl %eax, %edx
- orl %ecx, %edx
- testl $0xFFFFFF00, %edx # If both arguments are less
- # than 8-bit and +ve
- jz .Lop_div_int_8 # Do 8-bit divide
- testl $0xFFFF0000, %edx # If both arguments are less
- # than 16-bit and +ve
- jz .Lop_div_int_16 # Do 16-bit divide
- cmpl $-1, %ecx
- jne .Lop_div_int_32
- cmpl $0x80000000, %eax
- jne .Lop_div_int_32
- movl $0x80000000, %eax
- jmp .Lop_div_int_finish
-.Lop_div_int_32:
- cltd
- idivl %ecx
- jmp .Lop_div_int_finish
-.Lop_div_int_8:
- div %cl # 8-bit divide otherwise.
- # Remainder in %ah, quotient in %al
- .if 0
- movl %eax, %edx
- shr $8, %edx
- .else
- andl $0x000000FF, %eax
- .endif
- jmp .Lop_div_int_finish
-.Lop_div_int_16:
- xorl %edx, %edx # Clear %edx before divide
- div %cx
-.Lop_div_int_finish:
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: x86/op_rem_int.S */
-/* File: x86/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # ecx <- vCC
- mov rIBASE, LOCAL0(%esp)
- testl %ecx, %ecx
- je common_errDivideByZero
- movl %eax, %edx
- orl %ecx, %edx
- testl $0xFFFFFF00, %edx # If both arguments are less
- # than 8-bit and +ve
- jz .Lop_rem_int_8 # Do 8-bit divide
- testl $0xFFFF0000, %edx # If both arguments are less
- # than 16-bit and +ve
- jz .Lop_rem_int_16 # Do 16-bit divide
- cmpl $-1, %ecx
- jne .Lop_rem_int_32
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_32
- movl $0, rIBASE
- jmp .Lop_rem_int_finish
-.Lop_rem_int_32:
- cltd
- idivl %ecx
- jmp .Lop_rem_int_finish
-.Lop_rem_int_8:
- div %cl # 8-bit divide otherwise.
- # Remainder in %ah, quotient in %al
- .if 1
- movl %eax, %edx
- shr $8, %edx
- .else
- andl $0x000000FF, %eax
- .endif
- jmp .Lop_rem_int_finish
-.Lop_rem_int_16:
- xorl %edx, %edx # Clear %edx before divide
- div %cx
-.Lop_rem_int_finish:
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: x86/op_and_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- andl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: x86/op_or_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- orl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: x86/op_xor_int.S */
-/* File: x86/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- xorl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: x86/op_shl_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: x86/op_shr_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: x86/op_ushr_int.S */
-/* File: x86/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: x86/op_add_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- addl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- adcl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: x86/op_sub_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- subl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- sbbl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: x86/op_mul_long.S */
-/*
- * Signed 64-bit integer multiply.
- *
- * We could definately use more free registers for
- * this code. We spill rINSTw (ebx),
- * giving us eax, ebc, ecx and edx as computational
- * temps. On top of that, we'll spill edi (rFP)
- * for use as the vB pointer and esi (rPC) for use
- * as the vC pointer. Yuck.
- *
- */
- /* mul-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- B
- movzbl 3(rPC), %ecx # ecx <- C
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[B]
- leal (rFP,%ecx,4), rFP # rFP <- &v[C]
- movl 4(%esi), %ecx # ecx <- Bmsw
- imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
- movl 4(rFP), %eax # eax <- Cmsw
- imull (%esi), %eax # eax <- (Cmsw*Blsw)
- addl %eax, %ecx # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
- movl (rFP), %eax # eax <- Clsw
- mull (%esi) # eax <- (Clsw*Alsw)
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL1(%esp), rFP # restore FP
- leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
- SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
- mov LOCAL2(%esp), rIBASE # restore IBASE
- SET_VREG %eax, rINST # v[B] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: x86/op_div_long.S */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div vAA, vBB, vCC */
- .extern art_quick_ldiv
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %ecx, %edx
- orl %ebx, %ecx
- jz common_errDivideByZero
- movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx, %eax
- GET_VREG %eax, %eax
- call SYMBOL(art_quick_ldiv)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: x86/op_rem_long.S */
-/* File: x86/op_div_long.S */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div vAA, vBB, vCC */
- .extern art_quick_lmod
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %ecx, %edx
- orl %ebx, %ecx
- jz common_errDivideByZero
- movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx, %eax
- GET_VREG %eax, %eax
- call SYMBOL(art_quick_lmod)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: x86/op_and_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- andl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- andl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: x86/op_or_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- orl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- orl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: x86/op_xor_long.S */
-/* File: x86/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- xorl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
- xorl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: x86/op_shl_long.S */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shl-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rINST */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shldl %eax,rIBASE
- sall %cl, %eax
- testb $32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: x86/op_shr_long.S */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: x86/op_ushr_long.S */
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[BB+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: x86/op_add_float.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- addss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: x86/op_sub_float.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- subss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: x86/op_mul_float.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- mulss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: x86/op_div_float.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- divss VREG_ADDRESS(%eax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: x86/op_rem_float.S */
- /* rem_float vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- flds VREG_ADDRESS(%ecx) # vBB to fp stack
- flds VREG_ADDRESS(%eax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINST) # %st to vAA
- CLEAR_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: x86/op_add_double.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- addsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: x86/op_sub_double.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- subsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: x86/op_mul_double.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- mulsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: x86/op_div_double.S */
-/* File: x86/sseBinop.S */
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- divsd VREG_ADDRESS(%eax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: x86/op_rem_double.S */
- /* rem_double vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- fldl VREG_ADDRESS(%ecx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%eax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINST) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: x86/op_add_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- addl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: x86/op_sub_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- subl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: x86/op_mul_int_2addr.S */
- /* mul vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- movl rIBASE, rINST
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- movl rINST, rIBASE
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: x86/op_div_int_2addr.S */
-/* File: x86/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- mov rIBASE, LOCAL0(%esp)
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_div_int_2addr_continue_div2addr
- cmpl $0x80000000, %eax
- jne .Lop_div_int_2addr_continue_div2addr
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_div_int_2addr_continue_div2addr:
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: x86/op_rem_int_2addr.S */
-/* File: x86/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- mov rIBASE, LOCAL0(%esp)
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_rem_int_2addr_continue_div2addr
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_2addr_continue_div2addr
- movl $0, rIBASE
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.Lop_rem_int_2addr_continue_div2addr:
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: x86/op_and_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- andl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: x86/op_or_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- orl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: x86/op_xor_int_2addr.S */
-/* File: x86/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $0xf, %cl # ecx <- A
- xorl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: x86/op_shl_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: x86/op_shr_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: x86/op_ushr_int_2addr.S */
-/* File: x86/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: x86/op_add_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- addl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- adcl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: x86/op_sub_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- subl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- sbbl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: x86/op_mul_long_2addr.S */
-/*
- * Signed 64-bit integer multiply, 2-addr version
- *
- * We could definately use more free registers for
- * this code. We must spill %edx (rIBASE) because it
- * is used by imul. We'll also spill rINST (ebx),
- * giving us eax, ebc, ecx and rIBASE as computational
- * temps. On top of that, we'll spill %esi (edi)
- * for use as the vA pointer and rFP (esi) for use
- * as the vB pointer. Yuck.
- */
- /* mul-long/2addr vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- CLEAR_WIDE_REF %eax # clear refs in advance
- sarl $4, rINST # rINST <- B
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[A]
- leal (rFP,rINST,4), rFP # rFP <- &v[B]
- movl 4(%esi), %ecx # ecx <- Amsw
- imull (rFP), %ecx # ecx <- (Amsw*Blsw)
- movl 4(rFP), %eax # eax <- Bmsw
- imull (%esi), %eax # eax <- (Bmsw*Alsw)
- addl %eax, %ecx # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
- movl (rFP), %eax # eax <- Blsw
- mull (%esi) # eax <- (Blsw*Alsw)
- leal (%ecx,rIBASE), rIBASE # full result now in %edx:%eax
- movl rIBASE, 4(%esi) # v[A+1] <- rIBASE
- movl %eax, (%esi) # v[A] <- %eax
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL2(%esp), rIBASE # restore IBASE
- mov LOCAL1(%esp), rFP # restore FP
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: x86/op_div_long_2addr.S */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div/2addr vA, vB */
- .extern art_quick_ldiv
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- movzbl rINSTbl, %eax
- shrl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movl %ebx, %ecx
- GET_VREG %edx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %edx, %eax
- orl %ebx, %eax
- jz common_errDivideByZero
- GET_VREG %eax, %ecx
- GET_VREG_HIGH %ecx, %ecx
- call SYMBOL(art_quick_ldiv)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: x86/op_rem_long_2addr.S */
-/* File: x86/op_div_long_2addr.S */
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div/2addr vA, vB */
- .extern art_quick_lmod
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- movzbl rINSTbl, %eax
- shrl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movl %ebx, %ecx
- GET_VREG %edx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %edx, %eax
- orl %ebx, %eax
- jz common_errDivideByZero
- GET_VREG %eax, %ecx
- GET_VREG_HIGH %ecx, %ecx
- call SYMBOL(art_quick_lmod)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: x86/op_and_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- andl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- andl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: x86/op_or_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- orl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- orl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: x86/op_xor_long_2addr.S */
-/* File: x86/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $0xF, rINSTbl # rINST<- A
- xorl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
- xorl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: x86/op_shl_long_2addr.S */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shldl %eax, rIBASE
- sall %cl, %eax
- testb $32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: x86/op_shr_long_2addr.S */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86/op_ushr_long_2addr.S */
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: x86/op_add_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: x86/op_sub_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: x86/op_mul_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: x86/op_div_float_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divss VREG_ADDRESS(rINST), %xmm0
- movss %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: x86/op_rem_float_2addr.S */
- /* rem_float/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: x86/op_add_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: x86/op_sub_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: x86/op_mul_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: x86/op_div_double_2addr.S */
-/* File: x86/sseBinop2Addr.S */
- movzx rINSTbl, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divsd VREG_ADDRESS(rINST), %xmm0
- movsd %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: x86/op_rem_double_2addr.S */
- /* rem_double/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_WIDE_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: x86/op_add_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- addl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: x86/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- subl %eax, %ecx # for example: addl %ecx, %eax
- SET_VREG %ecx, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: x86/op_mul_int_lit16.S */
- /* mul/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movl rIBASE, %ecx
- movswl 2(rPC), rIBASE # rIBASE <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: x86/op_div_int_lit16.S */
-/* File: x86/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_div_int_lit16_continue_div
- cmpl $0x80000000, %eax
- jne .Lop_div_int_lit16_continue_div
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit16_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: x86/op_rem_int_lit16.S */
-/* File: x86/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- jne .Lop_rem_int_lit16_continue_div
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_lit16_continue_div
- movl $0, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit16_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: x86/op_and_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- andl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: x86/op_or_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- orl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: x86/op_xor_int_lit16.S */
-/* File: x86/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- xorl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: x86/op_add_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- addl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86/op_rsub_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- subl %eax, %ecx # ex: addl %ecx,%eax
- SET_VREG %ecx, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: x86/op_mul_int_lit8.S */
- /* mul/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movl rIBASE, %ecx
- GET_VREG %eax, %eax # eax <- rBB
- movsbl 3(rPC), rIBASE # rIBASE <- ssssssCC
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: x86/op_div_int_lit8.S */
-/* File: x86/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $0x80000000, %eax
- jne .Lop_div_int_lit8_continue_div
- cmpl $-1, %ecx
- jne .Lop_div_int_lit8_continue_div
- movl $0x80000000, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_div_int_lit8_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: x86/op_rem_int_lit8.S */
-/* File: x86/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $0x80000000, %eax
- jne .Lop_rem_int_lit8_continue_div
- cmpl $-1, %ecx
- jne .Lop_rem_int_lit8_continue_div
- movl $0, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.Lop_rem_int_lit8_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG rIBASE, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: x86/op_and_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- andl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: x86/op_or_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- orl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: x86/op_xor_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- xorl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: x86/op_shl_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: x86/op_shr_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86/op_ushr_int_lit8.S */
-/* File: x86/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: x86/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: x86/op_iget_wide_quick.S */
- /* iget-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movq (%ecx,%eax,1), %xmm0
- andb $0xf, rINSTbl # rINST <- A
- SET_WIDE_FP_VREG %xmm0, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: x86/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG1(%esp)
- EXPORT_PC
- call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: x86/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movl rINST, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: x86/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwl 2(rPC), %eax # eax<- field byte offset
- leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
- andb $0xf, rINSTbl # rINST<- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
- movq %xmm0, (%ecx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: x86/op_iput_object_quick.S */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST 232
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86/op_invoke_virtual_quick.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 233
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86/op_invoke_virtual_range_quick.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 234
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeVirtualQuickRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: x86/op_iput_boolean_quick.S */
-/* File: x86/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movb rINSTbl, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: x86/op_iput_byte_quick.S */
-/* File: x86/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movb rINSTbl, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: x86/op_iput_char_quick.S */
-/* File: x86/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movw rINSTw, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: x86/op_iput_short_quick.S */
-/* File: x86/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- movw rINSTw, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: x86/op_iget_boolean_quick.S */
-/* File: x86/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movsbl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: x86/op_iget_byte_quick.S */
-/* File: x86/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movsbl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: x86/op_iget_char_quick.S */
-/* File: x86/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: x86/op_iget_short_quick.S */
-/* File: x86/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movswl (%ecx,%eax,1), %eax
- andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: x86/op_unused_f3.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: x86/op_unused_f4.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: x86/op_unused_f5.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: x86/op_unused_f6.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: x86/op_unused_f7.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: x86/op_unused_f8.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: x86/op_unused_f9.S */
-/* File: x86/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: x86/op_invoke_polymorphic.S */
-/* File: x86/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 250
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokePolymorphic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86/op_invoke_polymorphic_range.S */
-/* File: x86/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 251
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokePolymorphicRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: x86/op_invoke_custom.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 252
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeCustom)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: x86/op_invoke_custom_range.S */
-/* File: x86/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST 253
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL(MterpInvokeCustomRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: x86/op_const_method_handle.S */
-/* File: x86/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: x86/op_const_method_type.S */
-/* File: x86/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
- .balign 128
-/* File: x86/instruction_end.S */
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: x86/instruction_start_sister.S */
-
- OBJECT_TYPE(artMterpAsmSisterStart)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
- .global SYMBOL(artMterpAsmSisterStart)
- .text
- .balign 4
-SYMBOL(artMterpAsmSisterStart):
-
-/* File: x86/instruction_end_sister.S */
-
- OBJECT_TYPE(artMterpAsmSisterEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
- .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
-
-/* File: x86/instruction_start_alt.S */
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(0*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(1*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(2*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(3*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(4*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(5*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(6*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(7*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(8*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(9*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(10*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(11*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(12*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(13*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(14*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(15*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(16*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(17*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(18*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(19*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(20*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(21*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(22*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(23*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(24*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(25*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(26*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(27*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(28*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(29*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(30*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(31*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(32*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(33*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(34*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(35*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(36*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(37*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(38*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(39*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(40*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(41*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(42*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(43*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(44*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(45*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(46*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(47*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(48*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(49*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(50*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(51*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(52*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(53*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(54*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(55*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(56*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(57*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(58*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(59*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(60*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(61*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(62*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(63*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(64*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(65*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(66*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(67*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(68*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(69*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(70*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(71*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(72*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(73*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(74*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(75*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(76*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(77*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(78*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(79*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(80*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(81*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(82*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(83*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(84*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(85*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(86*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(87*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(88*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(89*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(90*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(91*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(92*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(93*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(94*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(95*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(96*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(97*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(98*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(99*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(100*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(101*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(102*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(103*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(104*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(105*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(106*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(107*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(108*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(109*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(110*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(111*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(112*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(113*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(114*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(115*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(116*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(117*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(118*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(119*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(120*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(121*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(122*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(123*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(124*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(125*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(126*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(127*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(128*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(129*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(130*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(131*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(132*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(133*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(134*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(135*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(136*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(137*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(138*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(139*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(140*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(141*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(142*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(143*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(144*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(145*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(146*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(147*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(148*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(149*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(150*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(151*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(152*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(153*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(154*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(155*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(156*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(157*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(158*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(159*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(160*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(161*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(162*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(163*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(164*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(165*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(166*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(167*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(168*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(169*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(170*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(171*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(172*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(173*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(174*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(175*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(176*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(177*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(178*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(179*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(180*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(181*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(182*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(183*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(184*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(185*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(186*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(187*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(188*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(189*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(190*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(191*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(192*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(193*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(194*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(195*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(196*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(197*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(198*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(199*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(200*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(201*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(202*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(203*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(204*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(205*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(206*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(207*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(208*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(209*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(210*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(211*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(212*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(213*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(214*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(215*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(216*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(217*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(218*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(219*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(220*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(221*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(222*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(223*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(224*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(225*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(226*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(227*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(228*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(229*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(230*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(231*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(232*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(233*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(234*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(235*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(236*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(237*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(238*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(239*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(240*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(241*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(242*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(243*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(244*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(245*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(246*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(247*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(248*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(249*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(250*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(251*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(252*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(253*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(254*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: x86/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(255*128)
-
- .balign 128
-/* File: x86/instruction_end_alt.S */
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/* File: x86/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- movl THREAD_FLAGS_OFFSET(%eax), %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movl rSELF, %eax
- testl $-1, THREAD_EXCEPTION_OFFSET(%eax)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
- movl OFF_FP_DEX_PC(rFP), %ecx
- lea (%eax, %ecx, 2), rPC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpw $JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decw rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movl rSELF, %eax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- REFRESH_IBASE
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp) # rSELF in eax
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpw $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpw $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl $2, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpLogOSR)
-#endif
- movl $1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xor %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $1, %eax
- jmp MterpDone
-MterpReturn:
- movl OFF_FP_RESULT_REGISTER(rFP), %edx
- movl %eax, (%edx)
- movl %ecx, 4(%edx)
- mov $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmpw $0, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addl $FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %ebx
- POP %esi
- POP %edi
- POP %ebp
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-
diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S
deleted file mode 100644
index 89d5637..0000000
--- a/runtime/interpreter/mterp/out/mterp_x86_64.S
+++ /dev/null
@@ -1,12200 +0,0 @@
-/*
- * This file was generated automatically by gen-mterp.py for 'x86_64'.
- *
- * --> DO NOT EDIT <--
- */
-
-/* File: x86_64/header.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
- rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
- rbx, rbp, r12-r15
-Return regs:
- 32-bit in eax
- 64-bit in rax
- fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left. On entry to target, first
-param is at 8(%esp). Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPROFILE rbp countdown register for jit profiling
- rPC r12 interpreted program counter, used for fetching instructions
- rFP r13 interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE r14 base of instruction handler table
- rREFS r15 base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushq \_reg
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popq \_reg
- .cfi_adjust_cfa_offset -8
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE 8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 %rcx
-#define IN_ARG2 %rdx
-#define IN_ARG1 %rsi
-#define IN_ARG0 %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL (FRAME_SIZE - 8)
-/* Out Args */
-#define OUT_ARG3 %rcx
-#define OUT_ARG2 %rdx
-#define OUT_ARG1 %rsi
-#define OUT_ARG0 %rdi
-#define OUT_32_ARG3 %ecx
-#define OUT_32_ARG2 %edx
-#define OUT_32_ARG1 %esi
-#define OUT_32_ARG0 %edi
-#define OUT_FP_ARG1 %xmm1
-#define OUT_FP_ARG0 %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF SELF_SPILL(%rsp)
-#define rPC %r12
-#define CFI_DEX 12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP 5 // DWARF register number of the first argument register (rdi).
-#define rFP %r13
-#define rINST %ebx
-#define rINSTq %rbx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %r14
-#define rREFS %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
- movq THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
- movq rSELF, rIBASE
- REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb $\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwq (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(7), %eax
- addq rIBASE, %rax
- jmp *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leaq 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- xorq \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-/* File: x86_64/entry.S */
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa rsp, 8
-
- /* Spill callee save regs */
- PUSH %rbx
- PUSH %rbp
- PUSH %r12
- PUSH %r13
- PUSH %r14
- PUSH %r15
-
- /* Allocate frame */
- subq $FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Remember the return register */
- movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
- /* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
- leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
- leaq (rFP, %rax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq (IN_ARG1, %rax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- movq IN_ARG0, rSELF
- REFRESH_IBASE_REG IN_ARG0
-
- /* Set up for backwards branches & osr profiling */
- movq IN_ARG0, OUT_ARG2 /* Set up OUT_ARG2 before clobbering IN_ARG0 */
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpSetUpHotnessCountdown)
- movswl %ax, rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/* File: x86_64/instruction_start.S */
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
-
-/* ------------------------------ */
- .balign 128
-.L_op_nop: /* 0x00 */
-/* File: x86_64/op_nop.S */
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move: /* 0x01 */
-/* File: x86_64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_from16: /* 0x02 */
-/* File: x86_64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if 0
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_16: /* 0x03 */
-/* File: x86_64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide: /* 0x04 */
-/* File: x86_64/op_move_wide.S */
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movl rINST, %ecx # ecx <- BA
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_from16: /* 0x05 */
-/* File: x86_64/op_move_wide_from16.S */
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_wide_16: /* 0x06 */
-/* File: x86_64/op_move_wide_16.S */
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwq 4(rPC), %rcx # ecx<- BBBB
- movzwq 2(rPC), %rax # eax<- AAAA
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object: /* 0x07 */
-/* File: x86_64/op_move_object.S */
-/* File: x86_64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 1
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_from16: /* 0x08 */
-/* File: x86_64/op_move_object_from16.S */
-/* File: x86_64/op_move_from16.S */
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if 1
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_object_16: /* 0x09 */
-/* File: x86_64/op_move_object_16.S */
-/* File: x86_64/op_move_16.S */
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if 1
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result: /* 0x0a */
-/* File: x86_64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_wide: /* 0x0b */
-/* File: x86_64/op_move_result_wide.S */
- /* move-result-wide vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movq (%rax), %rdx # Get wide
- SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_result_object: /* 0x0c */
-/* File: x86_64/op_move_result_object.S */
-/* File: x86_64/op_move_result.S */
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if 1
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_move_exception: /* 0x0d */
-/* File: x86_64/op_move_exception.S */
- /* move-exception vAA */
- movq rSELF, %rcx
- movl THREAD_EXCEPTION_OFFSET(%rcx), %eax
- SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
- movl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void: /* 0x0e */
-/* File: x86_64/op_return_void.S */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return: /* 0x0f */
-/* File: x86_64/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_wide: /* 0x10 */
-/* File: x86_64/op_return_wide.S */
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_object: /* 0x11 */
-/* File: x86_64/op_return_object.S */
-/* File: x86_64/op_return.S */
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_4: /* 0x12 */
-/* File: x86_64/op_const_4.S */
- /* const/4 vA, #+B */
- movsbl rINSTbl, %eax # eax <-ssssssBx
- movl $0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $4, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_16: /* 0x13 */
-/* File: x86_64/op_const_16.S */
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const: /* 0x14 */
-/* File: x86_64/op_const.S */
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINSTq # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_high16: /* 0x15 */
-/* File: x86_64/op_const_high16.S */
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINSTq # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_16: /* 0x16 */
-/* File: x86_64/op_const_wide_16.S */
- /* const-wide/16 vAA, #+BBBB */
- movswq 2(rPC), %rax # rax <- ssssBBBB
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_32: /* 0x17 */
-/* File: x86_64/op_const_wide_32.S */
- /* const-wide/32 vAA, #+BBBBbbbb */
- movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide: /* 0x18 */
-/* File: x86_64/op_const_wide.S */
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_wide_high16: /* 0x19 */
-/* File: x86_64/op_const_wide_high16.S */
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwq 2(rPC), %rax # eax <- 0000BBBB
- salq $48, %rax # eax <- BBBB0000
- SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string: /* 0x1a */
-/* File: x86_64/op_const_string.S */
-/* File: x86_64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstString
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_string_jumbo: /* 0x1b */
-/* File: x86_64/op_const_string_jumbo.S */
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_class: /* 0x1c */
-/* File: x86_64/op_const_class.S */
-/* File: x86_64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstClass
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_enter: /* 0x1d */
-/* File: x86_64/op_monitor_enter.S */
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artLockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_monitor_exit: /* 0x1e */
-/* File: x86_64/op_monitor_exit.S */
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_check_cast: /* 0x1f */
-/* File: x86_64/op_check_cast.S */
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_instance_of: /* 0x20 */
-/* File: x86_64/op_instance_of.S */
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
- movl rINST, %eax # eax <- BA
- sarl $4, %eax # eax <- B
- leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movsbl %al, %eax
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- andb $0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_array_length: /* 0x21 */
-/* File: x86_64/op_array_length.S */
-/*
- * Return the length of an array.
- */
- movl rINST, %eax # eax <- BA
- sarl $4, rINST # rINST <- B
- GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
- SET_VREG rINST, %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_instance: /* 0x22 */
-/* File: x86_64/op_new_instance.S */
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rSELF, OUT_ARG1
- REFRESH_INST 34
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpNewInstance)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_new_array: /* 0x23 */
-/* File: x86_64/op_new_array.S */
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 35
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array: /* 0x24 */
-/* File: x86_64/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArray
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpFilledNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_filled_new_array_range: /* 0x25 */
-/* File: x86_64/op_filled_new_array_range.S */
-/* File: x86_64/op_filled_new_array.S */
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern MterpFilledNewArrayRange
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpFilledNewArrayRange)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_fill_array_data: /* 0x26 */
-/* File: x86_64/op_fill_array_data.S */
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movslq 2(rPC), %rcx # rcx <- ssssssssBBBBbbbb
- leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
-
-/* ------------------------------ */
- .balign 128
-.L_op_throw: /* 0x27 */
-/* File: x86_64/op_throw.S */
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINSTq # eax<- vAA (exception object)
- testb %al, %al
- jz common_errNullObject
- movq rSELF, %rcx
- movq %rax, THREAD_EXCEPTION_OFFSET(%rcx)
- jmp MterpException
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto: /* 0x28 */
-/* File: x86_64/op_goto.S */
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_16: /* 0x29 */
-/* File: x86_64/op_goto_16.S */
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_goto_32: /* 0x2a */
-/* File: x86_64/op_goto_32.S */
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_packed_switch: /* 0x2b */
-/* File: x86_64/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL(MterpDoPackedSwitch)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
-
-/* ------------------------------ */
- .balign 128
-.L_op_sparse_switch: /* 0x2c */
-/* File: x86_64/op_sparse_switch.S */
-/* File: x86_64/op_packed_switch.S */
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL(MterpDoSparseSwitch)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_float: /* 0x2d */
-/* File: x86_64/op_cmpl_float.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movss VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpl_float_nan_is_neg
- je .Lop_cmpl_float_finish
- jb .Lop_cmpl_float_less
-.Lop_cmpl_float_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpl_float_finish
-.Lop_cmpl_float_nan_is_neg:
-.Lop_cmpl_float_less:
- movl $-1, %eax
-.Lop_cmpl_float_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_float: /* 0x2e */
-/* File: x86_64/op_cmpg_float.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movss VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomiss VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpg_float_nan_is_pos
- je .Lop_cmpg_float_finish
- jb .Lop_cmpg_float_less
-.Lop_cmpg_float_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpg_float_finish
-.Lop_cmpg_float_nan_is_neg:
-.Lop_cmpg_float_less:
- movl $-1, %eax
-.Lop_cmpg_float_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpl_double: /* 0x2f */
-/* File: x86_64/op_cmpl_double.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movsd VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpl_double_nan_is_neg
- je .Lop_cmpl_double_finish
- jb .Lop_cmpl_double_less
-.Lop_cmpl_double_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpl_double_finish
-.Lop_cmpl_double_nan_is_neg:
-.Lop_cmpl_double_less:
- movl $-1, %eax
-.Lop_cmpl_double_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmpg_double: /* 0x30 */
-/* File: x86_64/op_cmpg_double.S */
-/* File: x86_64/fpcmp.S */
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movsd VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomisd VREG_ADDRESS(%rcx), %xmm0
- jp .Lop_cmpg_double_nan_is_pos
- je .Lop_cmpg_double_finish
- jb .Lop_cmpg_double_less
-.Lop_cmpg_double_nan_is_pos:
- addb $1, %al
- jmp .Lop_cmpg_double_finish
-.Lop_cmpg_double_nan_is_neg:
-.Lop_cmpg_double_less:
- movl $-1, %eax
-.Lop_cmpg_double_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_cmp_long: /* 0x31 */
-/* File: x86_64/op_cmp_long.S */
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbq 2(rPC), %rdx # edx <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
- xorl %eax, %eax
- xorl %edi, %edi
- addb $1, %al
- movl $-1, %esi
- cmpq VREG_ADDRESS(%rcx), %rdx
- cmovl %esi, %edi
- cmovg %eax, %edi
- SET_VREG %edi, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eq: /* 0x32 */
-/* File: x86_64/op_if_eq.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jne 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ne: /* 0x33 */
-/* File: x86_64/op_if_ne.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- je 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lt: /* 0x34 */
-/* File: x86_64/op_if_lt.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jge 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ge: /* 0x35 */
-/* File: x86_64/op_if_ge.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jl 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gt: /* 0x36 */
-/* File: x86_64/op_if_gt.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jle 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_le: /* 0x37 */
-/* File: x86_64/op_if_le.S */
-/* File: x86_64/bincmp.S */
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- jg 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_eqz: /* 0x38 */
-/* File: x86_64/op_if_eqz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jne 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_nez: /* 0x39 */
-/* File: x86_64/op_if_nez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- je 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_ltz: /* 0x3a */
-/* File: x86_64/op_if_ltz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jge 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gez: /* 0x3b */
-/* File: x86_64/op_if_gez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jl 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_gtz: /* 0x3c */
-/* File: x86_64/op_if_gtz.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jle 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_if_lez: /* 0x3d */
-/* File: x86_64/op_if_lez.S */
-/* File: x86_64/zcmp.S */
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- jg 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3e: /* 0x3e */
-/* File: x86_64/op_unused_3e.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_3f: /* 0x3f */
-/* File: x86_64/op_unused_3f.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_40: /* 0x40 */
-/* File: x86_64/op_unused_40.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_41: /* 0x41 */
-/* File: x86_64/op_unused_41.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_42: /* 0x42 */
-/* File: x86_64/op_unused_42.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_43: /* 0x43 */
-/* File: x86_64/op_unused_43.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget: /* 0x44 */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movl MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_wide: /* 0x45 */
-/* File: x86_64/op_aget_wide.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 1
- movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movq MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_object: /* 0x46 */
-/* File: x86_64/op_aget_object.S */
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
- GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
- EXPORT_PC
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_boolean: /* 0x47 */
-/* File: x86_64/op_aget_boolean.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_byte: /* 0x48 */
-/* File: x86_64/op_aget_byte.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_char: /* 0x49 */
-/* File: x86_64/op_aget_char.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aget_short: /* 0x4a */
-/* File: x86_64/op_aget_short.S */
-/* File: x86_64/op_aget.S */
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- movq MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput: /* 0x4b */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movl rINST, MIRROR_INT_ARRAY_DATA_OFFSET(%rax,%rcx,4)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_wide: /* 0x4c */
-/* File: x86_64/op_aput_wide.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 1
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movq rINSTq, MIRROR_WIDE_ARRAY_DATA_OFFSET(%rax,%rcx,8)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_object: /* 0x4d */
-/* File: x86_64/op_aput_object.S */
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 77
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpAputObject) # (array, index)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_boolean: /* 0x4e */
-/* File: x86_64/op_aput_boolean.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movb rINSTbl, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%rax,%rcx,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_byte: /* 0x4f */
-/* File: x86_64/op_aput_byte.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movb rINSTbl, MIRROR_BYTE_ARRAY_DATA_OFFSET(%rax,%rcx,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_char: /* 0x50 */
-/* File: x86_64/op_aput_char.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movw rINSTw, MIRROR_CHAR_ARRAY_DATA_OFFSET(%rax,%rcx,2)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_aput_short: /* 0x51 */
-/* File: x86_64/op_aput_short.S */
-/* File: x86_64/op_aput.S */
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if 0
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- movw rINSTw, MIRROR_SHORT_ARRAY_DATA_OFFSET(%rax,%rcx,2)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget: /* 0x52 */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetU32)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide: /* 0x53 */
-/* File: x86_64/op_iget_wide.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetU64)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 1
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object: /* 0x54 */
-/* File: x86_64/op_iget_object.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetObj)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean: /* 0x55 */
-/* File: x86_64/op_iget_boolean.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetU8)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte: /* 0x56 */
-/* File: x86_64/op_iget_byte.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetI8)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char: /* 0x57 */
-/* File: x86_64/op_iget_char.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetU16)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short: /* 0x58 */
-/* File: x86_64/op_iget_short.S */
-/* File: x86_64/op_iget.S */
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIGetI16)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput: /* 0x59 */
-/* File: x86_64/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU32
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutU32)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide: /* 0x5a */
-/* File: x86_64/op_iput_wide.S */
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST <- A
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object: /* 0x5b */
-/* File: x86_64/op_iput_object.S */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 91
- movl rINST, OUT_32_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean: /* 0x5c */
-/* File: x86_64/op_iput_boolean.S */
-/* File: x86_64/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU8
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutU8)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte: /* 0x5d */
-/* File: x86_64/op_iput_byte.S */
-/* File: x86_64/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI8
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutI8)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char: /* 0x5e */
-/* File: x86_64/op_iput_char.S */
-/* File: x86_64/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutU16
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutU16)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short: /* 0x5f */
-/* File: x86_64/op_iput_short.S */
-/* File: x86_64/op_iput.S */
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern MterpIPutI16
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutI16)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget: /* 0x60 */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU32
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetU32)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_wide: /* 0x61 */
-/* File: x86_64/op_sget_wide.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU64
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetU64)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 1
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_object: /* 0x62 */
-/* File: x86_64/op_sget_object.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetObj
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetObj)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 1
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_boolean: /* 0x63 */
-/* File: x86_64/op_sget_boolean.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU8
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetU8)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_byte: /* 0x64 */
-/* File: x86_64/op_sget_byte.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI8
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetI8)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_char: /* 0x65 */
-/* File: x86_64/op_sget_char.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetU16
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetU16)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sget_short: /* 0x66 */
-/* File: x86_64/op_sget_short.S */
-/* File: x86_64/op_sget.S */
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern MterpSGetI16
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL(MterpSGetI16)
- movq rSELF, %rcx
- cmpl $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if 0
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if 0
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput: /* 0x67 */
-/* File: x86_64/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU32
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutU32)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_wide: /* 0x68 */
-/* File: x86_64/op_sput_wide.S */
-/*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_object: /* 0x69 */
-/* File: x86_64/op_sput_object.S */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 105
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_boolean: /* 0x6a */
-/* File: x86_64/op_sput_boolean.S */
-/* File: x86_64/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU8
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutU8)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_byte: /* 0x6b */
-/* File: x86_64/op_sput_byte.S */
-/* File: x86_64/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutI8
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutI8)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_char: /* 0x6c */
-/* File: x86_64/op_sput_char.S */
-/* File: x86_64/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutU16
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutU16)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sput_short: /* 0x6d */
-/* File: x86_64/op_sput_short.S */
-/* File: x86_64/op_sput.S */
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern MterpSPutI16
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutI16)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual: /* 0x6e */
-/* File: x86_64/op_invoke_virtual.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtual
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 110
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtual)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super: /* 0x6f */
-/* File: x86_64/op_invoke_super.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuper
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 111
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeSuper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct: /* 0x70 */
-/* File: x86_64/op_invoke_direct.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirect
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 112
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeDirect)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static: /* 0x71 */
-/* File: x86_64/op_invoke_static.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStatic
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 113
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeStatic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface: /* 0x72 */
-/* File: x86_64/op_invoke_interface.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterface
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 114
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeInterface)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-/* ------------------------------ */
- .balign 128
-.L_op_return_void_no_barrier: /* 0x73 */
-/* File: x86_64/op_return_void_no_barrier.S */
- movq rSELF, OUT_ARG0
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range: /* 0x74 */
-/* File: x86_64/op_invoke_virtual_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 116
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_super_range: /* 0x75 */
-/* File: x86_64/op_invoke_super_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeSuperRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 117
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeSuperRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_direct_range: /* 0x76 */
-/* File: x86_64/op_invoke_direct_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeDirectRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 118
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeDirectRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_static_range: /* 0x77 */
-/* File: x86_64/op_invoke_static_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeStaticRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 119
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeStaticRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_interface_range: /* 0x78 */
-/* File: x86_64/op_invoke_interface_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeInterfaceRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 120
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeInterfaceRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_79: /* 0x79 */
-/* File: x86_64/op_unused_79.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_7a: /* 0x7a */
-/* File: x86_64/op_unused_7a.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_int: /* 0x7b */
-/* File: x86_64/op_neg_int.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- negl %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_int: /* 0x7c */
-/* File: x86_64/op_not_int.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- notl %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_long: /* 0x7d */
-/* File: x86_64/op_neg_long.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- negq %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_not_long: /* 0x7e */
-/* File: x86_64/op_not_long.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- notq %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_float: /* 0x7f */
-/* File: x86_64/op_neg_float.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
- xorl $0x80000000, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_neg_double: /* 0x80 */
-/* File: x86_64/op_neg_double.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
- movq $0x8000000000000000, %rsi
- xorq %rsi, %rax
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_long: /* 0x81 */
-/* File: x86_64/op_int_to_long.S */
- /* int to long vA, vB */
- movzbq rINSTbl, %rax # rax <- +A
- sarl $4, %eax # eax <- B
- andb $0xf, rINSTbl # rINST <- A
- movslq VREG_ADDRESS(%rax), %rax
- SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_float: /* 0x82 */
-/* File: x86_64/op_int_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2ssl VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_double: /* 0x83 */
-/* File: x86_64/op_int_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2sdl VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_int: /* 0x84 */
-/* File: x86_64/op_long_to_int.S */
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-/* File: x86_64/op_move.S */
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $0xf, %al # eax <- A
- shrl $4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if 0
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_float: /* 0x85 */
-/* File: x86_64/op_long_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2ssq VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_long_to_double: /* 0x86 */
-/* File: x86_64/op_long_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsi2sdq VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_int: /* 0x87 */
-/* File: x86_64/op_float_to_int.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movss VREG_ADDRESS(rINSTq), %xmm0
- movl $0x7fffffff, %eax
- cvtsi2ssl %eax, %xmm1
- comiss %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttss2sil %xmm0, %eax
- jmp 1f
-2:
- xorl %eax, %eax
-1:
- .if 0
- SET_WIDE_VREG %eax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_long: /* 0x88 */
-/* File: x86_64/op_float_to_long.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movss VREG_ADDRESS(rINSTq), %xmm0
- movq $0x7fffffffffffffff, %rax
- cvtsi2ssq %rax, %xmm1
- comiss %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttss2siq %xmm0, %rax
- jmp 1f
-2:
- xorq %rax, %rax
-1:
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %rax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_float_to_double: /* 0x89 */
-/* File: x86_64/op_float_to_double.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtss2sd VREG_ADDRESS(rINSTq), %xmm0
- .if 1
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_int: /* 0x8a */
-/* File: x86_64/op_double_to_int.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movsd VREG_ADDRESS(rINSTq), %xmm0
- movl $0x7fffffff, %eax
- cvtsi2sdl %eax, %xmm1
- comisd %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttsd2sil %xmm0, %eax
- jmp 1f
-2:
- xorl %eax, %eax
-1:
- .if 0
- SET_WIDE_VREG %eax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_long: /* 0x8b */
-/* File: x86_64/op_double_to_long.S */
-/* File: x86_64/cvtfp_int.S */
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- movsd VREG_ADDRESS(rINSTq), %xmm0
- movq $0x7fffffffffffffff, %rax
- cvtsi2sdq %rax, %xmm1
- comisd %xmm1, %xmm0
- jae 1f
- jp 2f
- cvttsd2siq %xmm0, %rax
- jmp 1f
-2:
- xorq %rax, %rax
-1:
- .if 1
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %rax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_double_to_float: /* 0x8c */
-/* File: x86_64/op_double_to_float.S */
-/* File: x86_64/fpcvt.S */
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- cvtsd2ss VREG_ADDRESS(rINSTq), %xmm0
- .if 0
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_byte: /* 0x8d */
-/* File: x86_64/op_int_to_byte.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movsbl %al, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_char: /* 0x8e */
-/* File: x86_64/op_int_to_char.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movzwl %ax,%eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_int_to_short: /* 0x8f */
-/* File: x86_64/op_int_to_short.S */
-/* File: x86_64/unop.S */
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4,rINST # rINST <- B
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $0xf,%cl # ecx <- A
-
-movswl %ax, %eax
- .if 0
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int: /* 0x90 */
-/* File: x86_64/op_add_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- addl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int: /* 0x91 */
-/* File: x86_64/op_sub_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- subl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int: /* 0x92 */
-/* File: x86_64/op_mul_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- imull (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int: /* 0x93 */
-/* File: x86_64/op_div_int.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 0
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %ecx, %rcx # ecx <- vCC
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %eax, rINSTq # eax <- vBB
- .else
- SET_VREG %eax, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int: /* 0x94 */
-/* File: x86_64/op_rem_int.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 0
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %ecx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %ecx, %rcx # ecx <- vCC
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %edx, rINSTq # eax <- vBB
- .else
- SET_VREG %edx, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int: /* 0x95 */
-/* File: x86_64/op_and_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- andl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int: /* 0x96 */
-/* File: x86_64/op_or_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- orl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int: /* 0x97 */
-/* File: x86_64/op_xor_int.S */
-/* File: x86_64/binop.S */
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- xorl (rFP,%rcx,4), %eax # ex: addl (rFP,%rcx,4),%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int: /* 0x98 */
-/* File: x86_64/op_shl_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int: /* 0x99 */
-/* File: x86_64/op_shr_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int: /* 0x9a */
-/* File: x86_64/op_ushr_int.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 0
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long: /* 0x9b */
-/* File: x86_64/op_add_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- addq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long: /* 0x9c */
-/* File: x86_64/op_sub_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- subq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long: /* 0x9d */
-/* File: x86_64/op_mul_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- imulq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long: /* 0x9e */
-/* File: x86_64/op_div_long.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 1
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %rcx, %rcx # ecx <- vCC
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rax, rINSTq # eax <- vBB
- .else
- SET_VREG %rax, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorq %rax, %rax
- .else
- negq %rax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long: /* 0x9f */
-/* File: x86_64/op_rem_long.S */
-/* File: x86_64/bindiv.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if 1
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG %rcx, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG %rcx, %rcx # ecx <- vCC
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rdx, rINSTq # eax <- vBB
- .else
- SET_VREG %rdx, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorq %rdx, %rdx
- .else
- negq %rdx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long: /* 0xa0 */
-/* File: x86_64/op_and_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- andq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long: /* 0xa1 */
-/* File: x86_64/op_or_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- orq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long: /* 0xa2 */
-/* File: x86_64/op_xor_long.S */
-/* File: x86_64/binopWide.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- xorq (rFP,%rcx,4), %rax # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long: /* 0xa3 */
-/* File: x86_64/op_shl_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- salq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- salq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long: /* 0xa4 */
-/* File: x86_64/op_shr_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- sarq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- sarq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long: /* 0xa5 */
-/* File: x86_64/op_ushr_long.S */
-/* File: x86_64/binop1.S */
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if 1
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- shrq %cl, %rax # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- shrq %cl, %rax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float: /* 0xa6 */
-/* File: x86_64/op_add_float.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- addss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float: /* 0xa7 */
-/* File: x86_64/op_sub_float.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- subss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float: /* 0xa8 */
-/* File: x86_64/op_mul_float.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- mulss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float: /* 0xa9 */
-/* File: x86_64/op_div_float.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- divss VREG_ADDRESS(%rax), %xmm0
- movss %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float: /* 0xaa */
-/* File: x86_64/op_rem_float.S */
- /* rem_float vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- flds VREG_ADDRESS(%rcx) # vBB to fp stack
- flds VREG_ADDRESS(%rax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINSTq) # %st to vAA
- CLEAR_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double: /* 0xab */
-/* File: x86_64/op_add_double.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- addsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double: /* 0xac */
-/* File: x86_64/op_sub_double.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- subsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double: /* 0xad */
-/* File: x86_64/op_mul_double.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- mulsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double: /* 0xae */
-/* File: x86_64/op_div_double.S */
-/* File: x86_64/sseBinop.S */
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- divsd VREG_ADDRESS(%rax), %xmm0
- movsd %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double: /* 0xaf */
-/* File: x86_64/op_rem_double.S */
- /* rem_double vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_2addr: /* 0xb0 */
-/* File: x86_64/op_add_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- addl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_int_2addr: /* 0xb1 */
-/* File: x86_64/op_sub_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- subl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_2addr: /* 0xb2 */
-/* File: x86_64/op_mul_int_2addr.S */
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, %rcx # eax <- vA
- imull (rFP,rINSTq,4), %eax
- SET_VREG %eax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_2addr: /* 0xb3 */
-/* File: x86_64/op_div_int_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %ecx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %ecx, %rcx # ecx <- vB
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %eax, rINSTq # vA <- result
- .else
- SET_VREG %eax, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_2addr: /* 0xb4 */
-/* File: x86_64/op_rem_int_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %ecx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %ecx, %rcx # ecx <- vB
- .endif
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rdx:rax <- sign-extended of rax
- idivl %ecx
-1:
- .if 0
- SET_WIDE_VREG %edx, rINSTq # vA <- result
- .else
- SET_VREG %edx, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_2addr: /* 0xb5 */
-/* File: x86_64/op_and_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- andl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_2addr: /* 0xb6 */
-/* File: x86_64/op_or_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- orl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_2addr: /* 0xb7 */
-/* File: x86_64/op_xor_int_2addr.S */
-/* File: x86_64/binop2addr.S */
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- xorl %eax, (rFP,%rcx,4) # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_2addr: /* 0xb8 */
-/* File: x86_64/op_shl_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sall %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_2addr: /* 0xb9 */
-/* File: x86_64/op_shr_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sarl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_2addr: /* 0xba */
-/* File: x86_64/op_ushr_int_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 0
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- shrl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_long_2addr: /* 0xbb */
-/* File: x86_64/op_add_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- addq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_long_2addr: /* 0xbc */
-/* File: x86_64/op_sub_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- subq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_long_2addr: /* 0xbd */
-/* File: x86_64/op_mul_long_2addr.S */
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, %rcx # rax <- vA
- imulq (rFP,rINSTq,4), %rax
- SET_WIDE_VREG %rax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_long_2addr: /* 0xbe */
-/* File: x86_64/op_div_long_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %rcx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %rcx, %rcx # ecx <- vB
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rax, rINSTq # vA <- result
- .else
- SET_VREG %rax, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 0
- xorq %rax, %rax
- .else
- negq %rax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_long_2addr: /* 0xbf */
-/* File: x86_64/op_rem_long_2addr.S */
-/* File: x86_64/bindiv2addr.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # rcx <- B
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG %rcx, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG %rcx, %rcx # ecx <- vB
- .endif
- testq %rcx, %rcx
- jz common_errDivideByZero
- cmpq $-1, %rcx
- je 2f
- cqo # rdx:rax <- sign-extended of rax
- idivq %rcx
-1:
- .if 1
- SET_WIDE_VREG %rdx, rINSTq # vA <- result
- .else
- SET_VREG %rdx, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if 1
- xorq %rdx, %rdx
- .else
- negq %rdx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_long_2addr: /* 0xc0 */
-/* File: x86_64/op_and_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- andq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_long_2addr: /* 0xc1 */
-/* File: x86_64/op_or_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- orq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_long_2addr: /* 0xc2 */
-/* File: x86_64/op_xor_long_2addr.S */
-/* File: x86_64/binopWide2addr.S */
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $4, rINST # rINST <- B
- andb $0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- xorq %rax, (rFP,%rcx,4) # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_long_2addr: /* 0xc3 */
-/* File: x86_64/op_shl_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- salq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- salq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_long_2addr: /* 0xc4 */
-/* File: x86_64/op_shr_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- sarq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- sarq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86_64/op_ushr_long_2addr.S */
-/* File: x86_64/shop2addr.S */
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $0xf, rINSTbl # rINST <- A
- .if 1
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- shrq %cl, %rax # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- shrq %cl, %rax # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_float_2addr: /* 0xc6 */
-/* File: x86_64/op_add_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_float_2addr: /* 0xc7 */
-/* File: x86_64/op_sub_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_float_2addr: /* 0xc8 */
-/* File: x86_64/op_mul_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_float_2addr: /* 0xc9 */
-/* File: x86_64/op_div_float_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movss VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divss VREG_ADDRESS(rINSTq), %xmm0
- movss %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movss %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_float_2addr: /* 0xca */
-/* File: x86_64/op_rem_float_2addr.S */
- /* rem_float/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $4, rINST # rINST <- B
- flds VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_double_2addr: /* 0xcb */
-/* File: x86_64/op_add_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- addsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_sub_double_2addr: /* 0xcc */
-/* File: x86_64/op_sub_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- subsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_double_2addr: /* 0xcd */
-/* File: x86_64/op_mul_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- mulsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_double_2addr: /* 0xce */
-/* File: x86_64/op_div_double_2addr.S */
-/* File: x86_64/sseBinop2Addr.S */
- movl rINST, %ecx # ecx <- A+
- andl $0xf, %ecx # ecx <- A
- movsd VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $4, rINST # rINST<- B
- divsd VREG_ADDRESS(rINSTq), %xmm0
- movsd %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movsd %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_double_2addr: /* 0xcf */
-/* File: x86_64/op_rem_double_2addr.S */
- /* rem_double/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit16: /* 0xd0 */
-/* File: x86_64/op_add_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- addl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int: /* 0xd1 */
-/* File: x86_64/op_rsub_int.S */
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- subl %eax, %ecx # for example: addl %ecx, %eax
- SET_VREG %ecx, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit16: /* 0xd2 */
-/* File: x86_64/op_mul_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- imull %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit16: /* 0xd3 */
-/* File: x86_64/op_div_int_lit16.S */
-/* File: x86_64/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %eax, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit16: /* 0xd4 */
-/* File: x86_64/op_rem_int_lit16.S */
-/* File: x86_64/bindivLit16.S */
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %edx, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit16: /* 0xd5 */
-/* File: x86_64/op_and_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit16: /* 0xd6 */
-/* File: x86_64/op_or_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- orl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit16: /* 0xd7 */
-/* File: x86_64/op_xor_int_lit16.S */
-/* File: x86_64/binopLit16.S */
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- xorl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_add_int_lit8: /* 0xd8 */
-/* File: x86_64/op_add_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- addl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86_64/op_rsub_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- subl %eax, %ecx # ex: addl %ecx,%eax
- SET_VREG %ecx, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_mul_int_lit8: /* 0xda */
-/* File: x86_64/op_mul_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- imull %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_div_int_lit8: /* 0xdb */
-/* File: x86_64/op_div_int_lit8.S */
-/* File: x86_64/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %eax, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 0
- xorl %eax, %eax
- .else
- negl %eax
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_rem_int_lit8: /* 0xdc */
-/* File: x86_64/op_rem_int_lit8.S */
-/* File: x86_64/bindivLit8.S */
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG %edx, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if 1
- xorl %edx, %edx
- .else
- negl %edx
- .endif
- jmp 1b
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_and_int_lit8: /* 0xdd */
-/* File: x86_64/op_and_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- andl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_or_int_lit8: /* 0xde */
-/* File: x86_64/op_or_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- orl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_xor_int_lit8: /* 0xdf */
-/* File: x86_64/op_xor_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- xorl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shl_int_lit8: /* 0xe0 */
-/* File: x86_64/op_shl_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_shr_int_lit8: /* 0xe1 */
-/* File: x86_64/op_shr_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86_64/op_ushr_int_lit8.S */
-/* File: x86_64/binopLit8.S */
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_quick: /* 0xe3 */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_wide_quick: /* 0xe4 */
-/* File: x86_64/op_iget_wide_quick.S */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 1
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movswl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_object_quick: /* 0xe5 */
-/* File: x86_64/op_iget_object_quick.S */
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- .extern artIGetObjectFromMterp
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
- movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
- EXPORT_PC
- callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $0xf, rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_quick: /* 0xe6 */
-/* File: x86_64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movl rINST, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_wide_quick: /* 0xe7 */
-/* File: x86_64/op_iput_wide_quick.S */
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $4, %ecx # ecx<- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwq 2(rPC), %rax # rax<- field byte offset
- leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
- andb $0xf, rINSTbl # rINST<- A
- GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
- movq %rax, (%rcx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_object_quick: /* 0xe8 */
-/* File: x86_64/op_iput_object_quick.S */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST 232
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86_64/op_invoke_virtual_quick.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuick
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 233
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86_64/op_invoke_virtual_range_quick.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeVirtualQuickRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 234
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeVirtualQuickRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_boolean_quick: /* 0xeb */
-/* File: x86_64/op_iput_boolean_quick.S */
-/* File: x86_64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movb rINSTbl, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_byte_quick: /* 0xec */
-/* File: x86_64/op_iput_byte_quick.S */
-/* File: x86_64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movb rINSTbl, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_char_quick: /* 0xed */
-/* File: x86_64/op_iput_char_quick.S */
-/* File: x86_64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movw rINSTw, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iput_short_quick: /* 0xee */
-/* File: x86_64/op_iput_short_quick.S */
-/* File: x86_64/op_iput_quick.S */
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- movw rINSTw, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_boolean_quick: /* 0xef */
-/* File: x86_64/op_iget_boolean_quick.S */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movsbl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_byte_quick: /* 0xf0 */
-/* File: x86_64/op_iget_byte_quick.S */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movsbl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_char_quick: /* 0xf1 */
-/* File: x86_64/op_iget_char_quick.S */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movzwl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_iget_short_quick: /* 0xf2 */
-/* File: x86_64/op_iget_short_quick.S */
-/* File: x86_64/op_iget_quick.S */
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $0xf,rINSTbl # rINST <- A
- .if 0
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- movswl (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f3: /* 0xf3 */
-/* File: x86_64/op_unused_f3.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f4: /* 0xf4 */
-/* File: x86_64/op_unused_f4.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f5: /* 0xf5 */
-/* File: x86_64/op_unused_f5.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f6: /* 0xf6 */
-/* File: x86_64/op_unused_f6.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f7: /* 0xf7 */
-/* File: x86_64/op_unused_f7.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f8: /* 0xf8 */
-/* File: x86_64/op_unused_f8.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_unused_f9: /* 0xf9 */
-/* File: x86_64/op_unused_f9.S */
-/* File: x86_64/unused.S */
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic: /* 0xfa */
-/* File: x86_64/op_invoke_polymorphic.S */
-/* File: x86_64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphic
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 250
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokePolymorphic)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86_64/op_invoke_polymorphic_range.S */
-/* File: x86_64/invoke_polymorphic.S */
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern MterpInvokePolymorphicRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 251
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokePolymorphicRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom: /* 0xfc */
-/* File: x86_64/op_invoke_custom.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustom
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 252
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeCustom)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_invoke_custom_range: /* 0xfd */
-/* File: x86_64/op_invoke_custom_range.S */
-/* File: x86_64/invoke.S */
-/*
- * Generic invoke handler wrapper.
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
- .extern MterpInvokeCustomRange
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST 253
- movl rINST, OUT_32_ARG3
- call SYMBOL(MterpInvokeCustomRange)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 3
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_handle: /* 0xfe */
-/* File: x86_64/op_const_method_handle.S */
-/* File: x86_64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodHandle
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstMethodHandle) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
-/* ------------------------------ */
- .balign 128
-.L_op_const_method_type: /* 0xff */
-/* File: x86_64/op_const_method_type.S */
-/* File: x86_64/const.S */
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern MterpConstMethodType
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstMethodType) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-
- .balign 128
-/* File: x86_64/instruction_end.S */
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
-
-
-/*
- * ===========================================================================
- * Sister implementations
- * ===========================================================================
- */
-/* File: x86_64/instruction_start_sister.S */
-
- OBJECT_TYPE(artMterpAsmSisterStart)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
- .global SYMBOL(artMterpAsmSisterStart)
- .text
- .balign 4
-SYMBOL(artMterpAsmSisterStart):
-
-/* File: x86_64/instruction_end_sister.S */
-
- OBJECT_TYPE(artMterpAsmSisterEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
- .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
-
-/* File: x86_64/instruction_start_alt.S */
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_nop: /* 0x00 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(0*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move: /* 0x01 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(1*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_from16: /* 0x02 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(2*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_16: /* 0x03 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(3*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide: /* 0x04 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(4*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_from16: /* 0x05 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(5*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_wide_16: /* 0x06 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(6*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object: /* 0x07 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(7*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_from16: /* 0x08 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(8*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_object_16: /* 0x09 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(9*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result: /* 0x0a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(10*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_wide: /* 0x0b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(11*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_result_object: /* 0x0c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(12*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_move_exception: /* 0x0d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(13*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void: /* 0x0e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(14*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return: /* 0x0f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(15*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_wide: /* 0x10 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(16*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_object: /* 0x11 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(17*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_4: /* 0x12 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(18*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_16: /* 0x13 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(19*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const: /* 0x14 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(20*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_high16: /* 0x15 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(21*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_16: /* 0x16 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(22*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_32: /* 0x17 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(23*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide: /* 0x18 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(24*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_wide_high16: /* 0x19 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(25*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string: /* 0x1a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(26*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_string_jumbo: /* 0x1b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(27*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_class: /* 0x1c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(28*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_enter: /* 0x1d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(29*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_monitor_exit: /* 0x1e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(30*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_check_cast: /* 0x1f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(31*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_instance_of: /* 0x20 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(32*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_array_length: /* 0x21 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(33*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_instance: /* 0x22 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(34*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_new_array: /* 0x23 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(35*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array: /* 0x24 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(36*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_filled_new_array_range: /* 0x25 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(37*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_fill_array_data: /* 0x26 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(38*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_throw: /* 0x27 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(39*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto: /* 0x28 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(40*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_16: /* 0x29 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(41*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_goto_32: /* 0x2a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(42*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_packed_switch: /* 0x2b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(43*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sparse_switch: /* 0x2c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(44*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_float: /* 0x2d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(45*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_float: /* 0x2e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(46*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpl_double: /* 0x2f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(47*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmpg_double: /* 0x30 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(48*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_cmp_long: /* 0x31 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(49*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eq: /* 0x32 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(50*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ne: /* 0x33 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(51*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lt: /* 0x34 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(52*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ge: /* 0x35 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(53*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gt: /* 0x36 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(54*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_le: /* 0x37 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(55*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_eqz: /* 0x38 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(56*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_nez: /* 0x39 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(57*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_ltz: /* 0x3a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(58*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gez: /* 0x3b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(59*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_gtz: /* 0x3c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(60*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_if_lez: /* 0x3d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(61*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3e: /* 0x3e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(62*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_3f: /* 0x3f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(63*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_40: /* 0x40 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(64*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_41: /* 0x41 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(65*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_42: /* 0x42 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(66*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_43: /* 0x43 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(67*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget: /* 0x44 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(68*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_wide: /* 0x45 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(69*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_object: /* 0x46 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(70*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_boolean: /* 0x47 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(71*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_byte: /* 0x48 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(72*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_char: /* 0x49 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(73*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aget_short: /* 0x4a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(74*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput: /* 0x4b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(75*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_wide: /* 0x4c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(76*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_object: /* 0x4d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(77*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_boolean: /* 0x4e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(78*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_byte: /* 0x4f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(79*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_char: /* 0x50 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(80*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_aput_short: /* 0x51 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(81*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget: /* 0x52 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(82*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide: /* 0x53 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(83*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object: /* 0x54 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(84*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean: /* 0x55 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(85*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte: /* 0x56 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(86*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char: /* 0x57 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(87*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short: /* 0x58 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(88*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput: /* 0x59 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(89*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide: /* 0x5a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(90*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object: /* 0x5b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(91*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean: /* 0x5c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(92*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte: /* 0x5d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(93*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char: /* 0x5e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(94*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short: /* 0x5f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(95*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget: /* 0x60 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(96*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_wide: /* 0x61 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(97*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_object: /* 0x62 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(98*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_boolean: /* 0x63 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(99*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_byte: /* 0x64 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(100*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_char: /* 0x65 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(101*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sget_short: /* 0x66 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(102*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput: /* 0x67 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(103*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_wide: /* 0x68 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(104*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_object: /* 0x69 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(105*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_boolean: /* 0x6a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(106*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_byte: /* 0x6b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(107*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_char: /* 0x6c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(108*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sput_short: /* 0x6d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(109*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual: /* 0x6e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(110*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super: /* 0x6f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(111*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct: /* 0x70 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(112*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static: /* 0x71 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(113*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface: /* 0x72 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(114*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_return_void_no_barrier: /* 0x73 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(115*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range: /* 0x74 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(116*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_super_range: /* 0x75 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(117*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_direct_range: /* 0x76 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(118*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_static_range: /* 0x77 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(119*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_interface_range: /* 0x78 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(120*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_79: /* 0x79 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(121*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_7a: /* 0x7a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(122*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_int: /* 0x7b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(123*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_int: /* 0x7c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(124*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_long: /* 0x7d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(125*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_not_long: /* 0x7e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(126*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_float: /* 0x7f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(127*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_neg_double: /* 0x80 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(128*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_long: /* 0x81 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(129*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_float: /* 0x82 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(130*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_double: /* 0x83 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(131*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_int: /* 0x84 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(132*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_float: /* 0x85 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(133*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_long_to_double: /* 0x86 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(134*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_int: /* 0x87 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(135*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_long: /* 0x88 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(136*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_float_to_double: /* 0x89 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(137*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_int: /* 0x8a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(138*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_long: /* 0x8b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(139*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_double_to_float: /* 0x8c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(140*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_byte: /* 0x8d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(141*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_char: /* 0x8e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(142*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_int_to_short: /* 0x8f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(143*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int: /* 0x90 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(144*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int: /* 0x91 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(145*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int: /* 0x92 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(146*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int: /* 0x93 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(147*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int: /* 0x94 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(148*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int: /* 0x95 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(149*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int: /* 0x96 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(150*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int: /* 0x97 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(151*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int: /* 0x98 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(152*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int: /* 0x99 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(153*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int: /* 0x9a */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(154*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long: /* 0x9b */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(155*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long: /* 0x9c */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(156*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long: /* 0x9d */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(157*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long: /* 0x9e */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(158*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long: /* 0x9f */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(159*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long: /* 0xa0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(160*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long: /* 0xa1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(161*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long: /* 0xa2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(162*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long: /* 0xa3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(163*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long: /* 0xa4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(164*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long: /* 0xa5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(165*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float: /* 0xa6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(166*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float: /* 0xa7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(167*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float: /* 0xa8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(168*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float: /* 0xa9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(169*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float: /* 0xaa */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(170*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double: /* 0xab */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(171*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double: /* 0xac */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(172*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double: /* 0xad */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(173*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double: /* 0xae */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(174*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double: /* 0xaf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(175*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_2addr: /* 0xb0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(176*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_int_2addr: /* 0xb1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(177*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_2addr: /* 0xb2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(178*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_2addr: /* 0xb3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(179*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_2addr: /* 0xb4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(180*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_2addr: /* 0xb5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(181*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_2addr: /* 0xb6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(182*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_2addr: /* 0xb7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(183*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_2addr: /* 0xb8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(184*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_2addr: /* 0xb9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(185*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_2addr: /* 0xba */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(186*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_long_2addr: /* 0xbb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(187*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_long_2addr: /* 0xbc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(188*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_long_2addr: /* 0xbd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(189*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_long_2addr: /* 0xbe */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(190*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_long_2addr: /* 0xbf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(191*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_long_2addr: /* 0xc0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(192*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_long_2addr: /* 0xc1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(193*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_long_2addr: /* 0xc2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(194*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_long_2addr: /* 0xc3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(195*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_long_2addr: /* 0xc4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(196*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_long_2addr: /* 0xc5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(197*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_float_2addr: /* 0xc6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(198*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_float_2addr: /* 0xc7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(199*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_float_2addr: /* 0xc8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(200*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_float_2addr: /* 0xc9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(201*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_float_2addr: /* 0xca */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(202*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_double_2addr: /* 0xcb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(203*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_sub_double_2addr: /* 0xcc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(204*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_double_2addr: /* 0xcd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(205*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_double_2addr: /* 0xce */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(206*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_double_2addr: /* 0xcf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(207*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit16: /* 0xd0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(208*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int: /* 0xd1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(209*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit16: /* 0xd2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(210*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit16: /* 0xd3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(211*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit16: /* 0xd4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(212*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit16: /* 0xd5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(213*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit16: /* 0xd6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(214*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit16: /* 0xd7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(215*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_add_int_lit8: /* 0xd8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(216*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rsub_int_lit8: /* 0xd9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(217*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_mul_int_lit8: /* 0xda */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(218*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_div_int_lit8: /* 0xdb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(219*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_rem_int_lit8: /* 0xdc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(220*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_and_int_lit8: /* 0xdd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(221*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_or_int_lit8: /* 0xde */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(222*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_xor_int_lit8: /* 0xdf */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(223*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shl_int_lit8: /* 0xe0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(224*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_shr_int_lit8: /* 0xe1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(225*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_ushr_int_lit8: /* 0xe2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(226*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_quick: /* 0xe3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(227*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_wide_quick: /* 0xe4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(228*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_object_quick: /* 0xe5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(229*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_quick: /* 0xe6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(230*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_wide_quick: /* 0xe7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(231*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_object_quick: /* 0xe8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(232*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_quick: /* 0xe9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(233*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_virtual_range_quick: /* 0xea */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(234*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_boolean_quick: /* 0xeb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(235*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_byte_quick: /* 0xec */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(236*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_char_quick: /* 0xed */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(237*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iput_short_quick: /* 0xee */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(238*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_boolean_quick: /* 0xef */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(239*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_byte_quick: /* 0xf0 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(240*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_char_quick: /* 0xf1 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(241*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_iget_short_quick: /* 0xf2 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(242*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f3: /* 0xf3 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(243*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f4: /* 0xf4 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(244*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f5: /* 0xf5 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(245*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f6: /* 0xf6 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(246*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f7: /* 0xf7 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(247*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f8: /* 0xf8 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(248*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_unused_f9: /* 0xf9 */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(249*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic: /* 0xfa */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(250*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_polymorphic_range: /* 0xfb */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(251*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom: /* 0xfc */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(252*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_invoke_custom_range: /* 0xfd */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(253*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_handle: /* 0xfe */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(254*128)
-
-/* ------------------------------ */
- .balign 128
-.L_ALT_op_const_method_type: /* 0xff */
-/* File: x86_64/alt_stub.S */
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(255*128)
-
- .balign 128
-/* File: x86_64/instruction_end_alt.S */
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
-
-/* File: x86_64/footer.S */
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movq rSELF, %rcx
- cmpq $0, THREAD_EXCEPTION_OFFSET(%rcx)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
- mov OFF_FP_DEX_PC(rFP), %ecx
- leaq (%rax, %rcx, 2), rPC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpl $JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decl rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movq rSELF, %rax
- testl $(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
- REFRESH_IBASE_REG %rax
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpl $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpl $JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movswl %ax, rPROFILE
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl $2, OUT_32_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpLogOSR)
-#endif
- movl $1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xorl %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $1, %eax
- jmp MterpDone
-MterpReturn:
- movq OFF_FP_RESULT_REGISTER(rFP), %rdx
- movq %rax, (%rdx)
- movl $1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- testl rPROFILE, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addq $FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %r15
- POP %r14
- POP %r13
- POP %r12
- POP %rbp
- POP %rbx
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
-
diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh
deleted file mode 100755
index ca3dcd9..0000000
--- a/runtime/interpreter/mterp/rebuild.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Rebuild for all known targets. Necessary until the stuff in "out" gets
-# generated as part of the build.
-#
-set -e
-
-for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done
diff --git a/runtime/interpreter/mterp/x86/alt_stub.S b/runtime/interpreter/mterp/x86/alt_stub.S
deleted file mode 100644
index a5b39b8..0000000
--- a/runtime/interpreter/mterp/x86/alt_stub.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- REFRESH_IBASE
- jmp .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86/arithmetic.S
new file mode 100644
index 0000000..3b5f0be
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/arithmetic.S
@@ -0,0 +1,943 @@
+%def bindiv(result="", special="", rem=""):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ mov rIBASE, LOCAL0(%esp)
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ movl %eax, %edx
+ orl %ecx, %edx
+ testl $$0xFFFFFF00, %edx # If both arguments are less
+ # than 8-bit and +ve
+ jz .L${opcode}_8 # Do 8-bit divide
+ testl $$0xFFFF0000, %edx # If both arguments are less
+ # than 16-bit and +ve
+ jz .L${opcode}_16 # Do 16-bit divide
+ cmpl $$-1, %ecx
+ jne .L${opcode}_32
+ cmpl $$0x80000000, %eax
+ jne .L${opcode}_32
+ movl $special, $result
+ jmp .L${opcode}_finish
+% add_helper(lambda: bindiv_helper(result, rem))
+
+%def bindiv_helper(result, rem):
+.L${opcode}_32:
+ cltd
+ idivl %ecx
+ jmp .L${opcode}_finish
+.L${opcode}_8:
+ div %cl # 8-bit divide otherwise.
+ # Remainder in %ah, quotient in %al
+ .if $rem
+ movl %eax, %edx
+ shr $$8, %edx
+ .else
+ andl $$0x000000FF, %eax
+ .endif
+ jmp .L${opcode}_finish
+.L${opcode}_16:
+ xorl %edx, %edx # Clear %edx before divide
+ div %cx
+.L${opcode}_finish:
+ SET_VREG $result, rINST
+ mov LOCAL0(%esp), rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def bindiv2addr(result="", special=""):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movzx rINSTbl, %ecx # eax <- BA
+ mov rIBASE, LOCAL0(%esp)
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # eax <- vBB
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, rINST # eax <- vBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $$-1, %ecx
+ jne .L${opcode}_continue_div2addr
+ cmpl $$0x80000000, %eax
+ jne .L${opcode}_continue_div2addr
+ movl $special, $result
+ SET_VREG $result, rINST
+ mov LOCAL0(%esp), rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+% add_helper(lambda: bindiv2addr_helper(result))
+
+%def bindiv2addr_helper(result):
+.L${opcode}_continue_div2addr:
+ cltd
+ idivl %ecx
+ SET_VREG $result, rINST
+ mov LOCAL0(%esp), rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def bindivLit16(result="", special=""):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op0=minint and
+ * op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movzbl rINSTbl, %eax # eax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %eax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $$0xf, rINSTbl # rINST <- A
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $$-1, %ecx
+ jne .L${opcode}_continue_div
+ cmpl $$0x80000000, %eax
+ jne .L${opcode}_continue_div
+ movl $special, %eax
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_continue_div:
+ mov rIBASE, LOCAL0(%esp)
+ cltd
+ idivl %ecx
+ SET_VREG $result, rINST
+ mov LOCAL0(%esp), rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def bindivLit8(result="", special=""):
+/*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %eax # eax <- rBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $$0x80000000, %eax
+ jne .L${opcode}_continue_div
+ cmpl $$-1, %ecx
+ jne .L${opcode}_continue_div
+ movl $special, %eax
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_continue_div:
+ mov rIBASE, LOCAL0(%esp)
+ cltd
+ idivl %ecx
+ SET_VREG $result, rINST
+ mov LOCAL0(%esp), rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop(result="%eax", instr=""):
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op VREG_ADDRESS(%ecx)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB
+ $instr # ex: addl VREG_ADDRESS(%ecx),%eax
+ SET_VREG $result, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop1(result="%eax", tmp="%ecx", instr=""):
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC),%eax # eax <- BB
+ movzbl 3(rPC),%ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG $result, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movzx rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ GET_VREG %eax, rINST # eax <- vB
+ andb $$0xf, %cl # ecx <- A
+ $instr # for ex: addl %eax,VREG_ADDRESS(%ecx)
+ CLEAR_REF %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def binopLit16(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movzbl rINSTbl, %eax # eax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %eax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $$0xf, rINSTbl # rINST <- A
+ $instr # for example: addl %ecx, %eax
+ SET_VREG $result, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopLit8(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %eax # eax <- rBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG $result, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide(instr1="", instr2=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
+ $instr1 # ex: addl VREG_ADDRESS(%ecx),rIBASE
+ $instr2 # ex: adcl VREG_HIGH_ADDRESS(%ecx),%eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide2addr(instr1="", instr2=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $$0xF, rINSTbl # rINST<- A
+ $instr1 # ex: addl %eax,(rFP,rINST,4)
+ $instr2 # ex: adcl %ecx,4(rFP,rINST,4)
+ CLEAR_WIDE_REF rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def cvtfp_int(srcdouble="1", tgtlong="1"):
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate. This model
+ * differs from what is delivered normally via the x86 fpu, so we have
+ * to play some games.
+ */
+ /* float/double to int/long vA, vB */
+ movzbl rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ .if $srcdouble
+ fldl VREG_ADDRESS(rINST) # %st0 <- vB
+ .else
+ flds VREG_ADDRESS(rINST) # %st0 <- vB
+ .endif
+ ftst
+ fnstcw LOCAL0(%esp) # remember original rounding mode
+ movzwl LOCAL0(%esp), %eax
+ movb $$0xc, %ah
+ movw %ax, LOCAL0+2(%esp)
+ fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
+ andb $$0xf, %cl # ecx <- A
+ .if $tgtlong
+ fistpll VREG_ADDRESS(%ecx) # convert and store
+ .else
+ fistpl VREG_ADDRESS(%ecx) # convert and store
+ .endif
+ fldcw LOCAL0(%esp) # restore previous rounding mode
+ .if $tgtlong
+ movl $$0x80000000, %eax
+ xorl VREG_HIGH_ADDRESS(%ecx), %eax
+ orl VREG_ADDRESS(%ecx), %eax
+ .else
+ cmpl $$0x80000000, VREG_ADDRESS(%ecx)
+ .endif
+ je .L${opcode}_special_case # fix up result
+
+.L${opcode}_finish:
+ xor %eax, %eax
+ mov %eax, VREG_REF_ADDRESS(%ecx)
+ .if $tgtlong
+ mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+% add_helper(lambda: cvtfp_int_helper(tgtlong))
+
+%def cvtfp_int_helper(tgtlong):
+.L${opcode}_special_case:
+ fnstsw %ax
+ sahf
+ jp .L${opcode}_isNaN
+ adcl $$-1, VREG_ADDRESS(%ecx)
+ .if $tgtlong
+ adcl $$-1, VREG_HIGH_ADDRESS(%ecx)
+ .endif
+ jmp .L${opcode}_finish
+.L${opcode}_isNaN:
+ movl $$0, VREG_ADDRESS(%ecx)
+ .if $tgtlong
+ movl $$0, VREG_HIGH_ADDRESS(%ecx)
+ .endif
+ jmp .L${opcode}_finish
+
+%def shop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movzx rINSTbl, %ecx # eax <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # eax <- vBB
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, rINST # eax <- vAA
+ $instr # ex: sarl %cl, %eax
+ SET_VREG $result, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def unop(instr=""):
+/*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movzbl rINSTbl,%ecx # ecx <- A+
+ sarl $$4,rINST # rINST <- B
+ GET_VREG %eax, rINST # eax <- vB
+ andb $$0xf,%cl # ecx <- A
+ $instr
+ SET_VREG %eax, %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_int():
+% binop(instr="addl VREG_ADDRESS(%ecx), %eax")
+
+%def op_add_int_2addr():
+% binop2addr(instr="addl %eax, VREG_ADDRESS(%ecx)")
+
+%def op_add_int_lit16():
+% binopLit16(instr="addl %ecx, %eax")
+
+%def op_add_int_lit8():
+% binopLit8(instr="addl %ecx, %eax")
+
+%def op_add_long():
+% binopWide(instr1="addl VREG_ADDRESS(%ecx), rIBASE", instr2="adcl VREG_HIGH_ADDRESS(%ecx), %eax")
+
+%def op_add_long_2addr():
+% binopWide2addr(instr1="addl %eax, (rFP,rINST,4)", instr2="adcl %ecx, 4(rFP,rINST,4)")
+
+%def op_and_int():
+% binop(instr="andl VREG_ADDRESS(%ecx), %eax")
+
+%def op_and_int_2addr():
+% binop2addr(instr="andl %eax, VREG_ADDRESS(%ecx)")
+
+%def op_and_int_lit16():
+% binopLit16(instr="andl %ecx, %eax")
+
+%def op_and_int_lit8():
+% binopLit8(instr="andl %ecx, %eax")
+
+%def op_and_long():
+% binopWide(instr1="andl VREG_ADDRESS(%ecx), rIBASE", instr2="andl VREG_HIGH_ADDRESS(%ecx), %eax")
+
+%def op_and_long_2addr():
+% binopWide2addr(instr1="andl %eax, (rFP,rINST,4)", instr2="andl %ecx, 4(rFP,rINST,4)")
+
+%def op_cmp_long():
+/*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
+ cmpl VREG_HIGH_ADDRESS(%ecx), %eax
+ jl .L${opcode}_smaller
+ jg .L${opcode}_bigger
+ movzbl 2(rPC), %eax # eax <- BB, restore BB
+ GET_VREG %eax, %eax # eax <- v[BB]
+ sub VREG_ADDRESS(%ecx), %eax
+ ja .L${opcode}_bigger
+ jb .L${opcode}_smaller
+.L${opcode}_finish:
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+.L${opcode}_bigger:
+ movl $$1, %eax
+ jmp .L${opcode}_finish
+
+.L${opcode}_smaller:
+ movl $$-1, %eax
+ jmp .L${opcode}_finish
+
+%def op_div_int():
+% bindiv(result="%eax", special="$0x80000000", rem="0")
+
+%def op_div_int_2addr():
+% bindiv2addr(result="%eax", special="$0x80000000")
+
+%def op_div_int_lit16():
+% bindivLit16(result="%eax", special="$0x80000000")
+
+%def op_div_int_lit8():
+% bindivLit8(result="%eax", special="$0x80000000")
+
+%def op_div_long(routine="art_quick_ldiv"):
+/* art_quick_* methods has quick abi,
+ * so use eax, ecx, edx, ebx for args
+ */
+ /* div vAA, vBB, vCC */
+ .extern $routine
+ mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
+ mov rINST, LOCAL1(%esp) # save rINST/%ebx
+ movzbl 3(rPC), %eax # eax <- CC
+ GET_VREG %ecx, %eax
+ GET_VREG_HIGH %ebx, %eax
+ movl %ecx, %edx
+ orl %ebx, %ecx
+ jz common_errDivideByZero
+ movzbl 2(rPC), %eax # eax <- BB
+ GET_VREG_HIGH %ecx, %eax
+ GET_VREG %eax, %eax
+ call SYMBOL($routine)
+ mov LOCAL1(%esp), rINST # restore rINST/%ebx
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
+ mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_div_long_2addr(routine="art_quick_ldiv"):
+/* art_quick_* methods has quick abi,
+ * so use eax, ecx, edx, ebx for args
+ */
+ /* div/2addr vA, vB */
+ .extern $routine
+ mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
+ movzbl rINSTbl, %eax
+ shrl $$4, %eax # eax <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ mov rINST, LOCAL1(%esp) # save rINST/%ebx
+ movl %ebx, %ecx
+ GET_VREG %edx, %eax
+ GET_VREG_HIGH %ebx, %eax
+ movl %edx, %eax
+ orl %ebx, %eax
+ jz common_errDivideByZero
+ GET_VREG %eax, %ecx
+ GET_VREG_HIGH %ecx, %ecx
+ call SYMBOL($routine)
+ mov LOCAL1(%esp), rINST # restore rINST/%ebx
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
+ mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_int_to_byte():
+% unop(instr="movsbl %al, %eax")
+
+%def op_int_to_char():
+% unop(instr="movzwl %ax,%eax")
+
+%def op_int_to_long():
+ /* int to long vA, vB */
+ movzbl rINSTbl, %eax # eax <- +A
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %eax # eax <- vB
+ andb $$0xf, rINSTbl # rINST <- A
+ movl rIBASE, %ecx # cltd trashes rIBASE/edx
+ cltd # rINST:eax<- sssssssBBBBBBBB
+ SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
+ SET_VREG %eax, rINST # v[A+0] <- %eax
+ movl %ecx, rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_int_to_short():
+% unop(instr="movswl %ax, %eax")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+ /*
+ * 32-bit binary multiplication.
+ */
+ /* mul vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB
+ mov rIBASE, LOCAL0(%esp)
+ imull VREG_ADDRESS(%ecx), %eax # trashes rIBASE/edx
+ mov LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_int_2addr():
+ /* mul vA, vB */
+ movzx rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ GET_VREG %eax, rINST # eax <- vB
+ andb $$0xf, %cl # ecx <- A
+ movl rIBASE, rINST
+ imull VREG_ADDRESS(%ecx), %eax # trashes rIBASE/edx
+ movl rINST, rIBASE
+ SET_VREG %eax, %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_mul_int_lit16():
+ /* mul/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movzbl rINSTbl, %eax # eax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %eax # eax <- vB
+ movl rIBASE, %ecx
+ movswl 2(rPC), rIBASE # rIBASE <- ssssCCCC
+ andb $$0xf, rINSTbl # rINST <- A
+ imull rIBASE, %eax # trashes rIBASE/edx
+ movl %ecx, rIBASE
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_int_lit8():
+ /* mul/lit8 vAA, vBB, #+CC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movl rIBASE, %ecx
+ GET_VREG %eax, %eax # eax <- rBB
+ movsbl 3(rPC), rIBASE # rIBASE <- ssssssCC
+ imull rIBASE, %eax # trashes rIBASE/edx
+ movl %ecx, rIBASE
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_long():
+/*
+ * Signed 64-bit integer multiply.
+ *
+ * We could definately use more free registers for
+ * this code. We spill rINSTw (ebx),
+ * giving us eax, ebc, ecx and edx as computational
+ * temps. On top of that, we'll spill edi (rFP)
+ * for use as the vB pointer and esi (rPC) for use
+ * as the vC pointer. Yuck.
+ *
+ */
+ /* mul-long vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- B
+ movzbl 3(rPC), %ecx # ecx <- C
+ mov rPC, LOCAL0(%esp) # save Interpreter PC
+ mov rFP, LOCAL1(%esp) # save FP
+ mov rIBASE, LOCAL2(%esp) # save rIBASE
+ leal (rFP,%eax,4), %esi # esi <- &v[B]
+ leal VREG_ADDRESS(%ecx), rFP # rFP <- &v[C]
+ movl 4(%esi), %ecx # ecx <- Bmsw
+ imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
+ movl 4(rFP), %eax # eax <- Cmsw
+ imull (%esi), %eax # eax <- (Cmsw*Blsw)
+ addl %eax, %ecx # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
+ movl (rFP), %eax # eax <- Clsw
+ mull (%esi) # eax <- (Clsw*Alsw)
+ mov LOCAL0(%esp), rPC # restore Interpreter PC
+ mov LOCAL1(%esp), rFP # restore FP
+ leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
+ SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
+ mov LOCAL2(%esp), rIBASE # restore IBASE
+ SET_VREG %eax, rINST # v[B] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_mul_long_2addr():
+/*
+ * Signed 64-bit integer multiply, 2-addr version
+ *
+ * We could definately use more free registers for
+ * this code. We must spill %edx (rIBASE) because it
+ * is used by imul. We'll also spill rINST (ebx),
+ * giving us eax, ebc, ecx and rIBASE as computational
+ * temps. On top of that, we'll spill %esi (edi)
+ * for use as the vA pointer and rFP (esi) for use
+ * as the vB pointer. Yuck.
+ */
+ /* mul-long/2addr vA, vB */
+ movzbl rINSTbl, %eax # eax <- BA
+ andb $$0xf, %al # eax <- A
+ CLEAR_WIDE_REF %eax # clear refs in advance
+ sarl $$4, rINST # rINST <- B
+ mov rPC, LOCAL0(%esp) # save Interpreter PC
+ mov rFP, LOCAL1(%esp) # save FP
+ mov rIBASE, LOCAL2(%esp) # save rIBASE
+ leal (rFP,%eax,4), %esi # esi <- &v[A]
+ leal (rFP,rINST,4), rFP # rFP <- &v[B]
+ movl 4(%esi), %ecx # ecx <- Amsw
+ imull (rFP), %ecx # ecx <- (Amsw*Blsw)
+ movl 4(rFP), %eax # eax <- Bmsw
+ imull (%esi), %eax # eax <- (Bmsw*Alsw)
+ addl %eax, %ecx # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
+ movl (rFP), %eax # eax <- Blsw
+ mull (%esi) # eax <- (Blsw*Alsw)
+ leal (%ecx,rIBASE), rIBASE # full result now in %edx:%eax
+ movl rIBASE, 4(%esi) # v[A+1] <- rIBASE
+ movl %eax, (%esi) # v[A] <- %eax
+ mov LOCAL0(%esp), rPC # restore Interpreter PC
+ mov LOCAL2(%esp), rIBASE # restore IBASE
+ mov LOCAL1(%esp), rFP # restore FP
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_neg_int():
+% unop(instr="negl %eax")
+
+%def op_neg_long():
+ /* unop vA, vB */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
+ negl %eax
+ adcl $$0, %ecx
+ negl %ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_not_int():
+% unop(instr="notl %eax")
+
+%def op_not_long():
+ /* unop vA, vB */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
+ notl %eax
+ notl %ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_or_int():
+% binop(instr="orl VREG_ADDRESS(%ecx), %eax")
+
+%def op_or_int_2addr():
+% binop2addr(instr="orl %eax, VREG_ADDRESS(%ecx)")
+
+%def op_or_int_lit16():
+% binopLit16(instr="orl %ecx, %eax")
+
+%def op_or_int_lit8():
+% binopLit8(instr="orl %ecx, %eax")
+
+%def op_or_long():
+% binopWide(instr1="orl VREG_ADDRESS(%ecx), rIBASE", instr2="orl VREG_HIGH_ADDRESS(%ecx), %eax")
+
+%def op_or_long_2addr():
+% binopWide2addr(instr1="orl %eax, (rFP,rINST,4)", instr2="orl %ecx, 4(rFP,rINST,4)")
+
+%def op_rem_int():
+% bindiv(result="rIBASE", special="$0", rem="1")
+
+%def op_rem_int_2addr():
+% bindiv2addr(result="rIBASE", special="$0")
+
+%def op_rem_int_lit16():
+% bindivLit16(result="rIBASE", special="$0")
+
+%def op_rem_int_lit8():
+% bindivLit8(result="rIBASE", special="$0")
+
+%def op_rem_long():
+% op_div_long(routine="art_quick_lmod")
+
+%def op_rem_long_2addr():
+% op_div_long_2addr(routine="art_quick_lmod")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+% binopLit16(instr="subl %eax, %ecx", result="%ecx")
+
+%def op_rsub_int_lit8():
+% binopLit8(instr="subl %eax, %ecx", result="%ecx")
+
+%def op_shl_int():
+% binop1(instr="sall %cl, %eax")
+
+%def op_shl_int_2addr():
+% shop2addr(instr="sall %cl, %eax")
+
+%def op_shl_int_lit8():
+% binopLit8(instr="sall %cl, %eax")
+
+%def op_shl_long():
+/*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shl-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill rINST */
+ /* rINSTw gets AA */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
+ shldl %eax,rIBASE
+ sall %cl, %eax
+ testb $$32, %cl
+ je 2f
+ movl %eax, rIBASE
+ xorl %eax, %eax
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[AA+0] <- %eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_shl_long_2addr():
+/*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill rIBASE */
+ /* rINSTw gets AA */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ sarl $$4, %ecx # ecx <- B
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
+ shldl %eax, rIBASE
+ sall %cl, %eax
+ testb $$32, %cl
+ je 2f
+ movl %eax, rIBASE
+ xorl %eax, %eax
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_shr_int():
+% binop1(instr="sarl %cl, %eax")
+
+%def op_shr_int_2addr():
+% shop2addr(instr="sarl %cl, %eax")
+
+%def op_shr_int_lit8():
+% binopLit8(instr="sarl %cl, %eax")
+
+%def op_shr_long():
+/*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shr-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill rIBASE */
+ /* rINSTw gets AA */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
+ shrdl rIBASE, %eax
+ sarl %cl, rIBASE
+ testb $$32, %cl
+ je 2f
+ movl rIBASE, %eax
+ sarl $$31, rIBASE
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_shr_long_2addr():
+/*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill rIBASE */
+ /* rINSTw gets AA */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ sarl $$4, %ecx # ecx <- B
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
+ shrdl rIBASE, %eax
+ sarl %cl, rIBASE
+ testb $$32, %cl
+ je 2f
+ movl rIBASE, %eax
+ sarl $$31, rIBASE
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_int():
+% binop(instr="subl VREG_ADDRESS(%ecx), %eax")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="subl %eax, VREG_ADDRESS(%ecx)")
+
+%def op_sub_long():
+% binopWide(instr1="subl VREG_ADDRESS(%ecx), rIBASE", instr2="sbbl VREG_HIGH_ADDRESS(%ecx), %eax")
+
+%def op_sub_long_2addr():
+% binopWide2addr(instr1="subl %eax, (rFP,rINST,4)", instr2="sbbl %ecx, 4(rFP,rINST,4)")
+
+%def op_ushr_int():
+% binop1(instr="shrl %cl, %eax")
+
+%def op_ushr_int_2addr():
+% shop2addr(instr="shrl %cl, %eax")
+
+%def op_ushr_int_lit8():
+% binopLit8(instr="shrl %cl, %eax")
+
+%def op_ushr_long():
+/*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance. x86 shifts automatically mask off
+ * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
+ * case specially.
+ */
+ /* shr-long vAA, vBB, vCC */
+ /* ecx gets shift count */
+ /* Need to spill rIBASE */
+ /* rINSTw gets AA */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
+ shrdl rIBASE, %eax
+ shrl %cl, rIBASE
+ testb $$32, %cl
+ je 2f
+ movl rIBASE, %eax
+ xorl rIBASE, rIBASE
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[BB+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_ushr_long_2addr():
+/*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ /* ecx gets shift count */
+ /* Need to spill rIBASE */
+ /* rINSTw gets AA */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ sarl $$4, %ecx # ecx <- B
+ movl rIBASE, LOCAL0(%esp)
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
+ shrdl rIBASE, %eax
+ shrl %cl, rIBASE
+ testb $$32, %cl
+ je 2f
+ movl rIBASE, %eax
+ xorl rIBASE, rIBASE
+2:
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
+ movl LOCAL0(%esp), rIBASE
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_xor_int():
+% binop(instr="xorl VREG_ADDRESS(%ecx), %eax")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="xorl %eax, VREG_ADDRESS(%ecx)")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="xorl %ecx, %eax")
+
+%def op_xor_int_lit8():
+% binopLit8(instr="xorl %ecx, %eax")
+
+%def op_xor_long():
+% binopWide(instr1="xorl VREG_ADDRESS(%ecx), rIBASE", instr2="xorl VREG_HIGH_ADDRESS(%ecx), %eax")
+
+%def op_xor_long_2addr():
+% binopWide2addr(instr1="xorl %eax, (rFP,rINST,4)", instr2="xorl %ecx, 4(rFP,rINST,4)")
diff --git a/runtime/interpreter/mterp/x86/array.S b/runtime/interpreter/mterp/x86/array.S
new file mode 100644
index 0000000..de846a4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/array.S
@@ -0,0 +1,215 @@
+%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ $load $data_offset(%eax,%ecx,$shift), %eax
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_boolean():
+% op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+/*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
+ EXPORT_PC
+ movl %eax, OUT_ARG0(%esp)
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
+ movl rSELF, %ecx
+ RESTORE_IBASE_FROM_SELF %ecx
+ cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+ jnz MterpException
+ SET_VREG_OBJECT %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_short():
+% op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+/*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ */
+ /* aget-wide vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
+ movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
+ SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ leal $data_offset(%eax,%ecx,$shift), %eax
+ GET_VREG rINST, rINST
+ $store $reg, (%eax)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_boolean():
+% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+/*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ movl rPC, OUT_ARG1(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpAputObject) # (array, index)
+ RESTORE_IBASE
+ testb %al, %al
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_short():
+% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+/*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ */
+ /* aput-wide vAA, vBB, vCC */
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
+ movq %xmm0, (%eax) # vBB[vCC] <- xmm0
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_array_length():
+/*
+ * Return the length of an array.
+ */
+ mov rINST, %eax # eax <- BA
+ sarl $$4, rINST # rINST <- B
+ GET_VREG %ecx, rINST # ecx <- vB (object ref)
+ testl %ecx, %ecx # is null?
+ je common_errNullObject
+ andb $$0xf, %al # eax <- A
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
+ SET_VREG rINST, %eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), %ecx # ecx <- BBBBbbbb
+ leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
+ GET_VREG %eax, rINST # eax <- vAA (array object)
+ movl %eax, OUT_ARG0(%esp)
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
+ REFRESH_IBASE
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ movl rPC, OUT_ARG1(%esp)
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG2(%esp)
+ call SYMBOL($helper)
+ REFRESH_IBASE
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ movl rPC, OUT_ARG1(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG2(%esp)
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG3(%esp)
+ call SYMBOL(MterpNewArray)
+ RESTORE_IBASE
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bincmp.S b/runtime/interpreter/mterp/x86/bincmp.S
deleted file mode 100644
index ee32278..0000000
--- a/runtime/interpreter/mterp/x86/bincmp.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movzx rINSTbl, %ecx # ecx <- A+
- andb $$0xf, %cl # ecx <- A
- GET_VREG %eax, %ecx # eax <- vA
- sarl $$4, rINST # rINST <- B
- cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
- j${revcmp} 1f
- movswl 2(rPC), rINST # Get signed branch offset
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv.S b/runtime/interpreter/mterp/x86/bindiv.S
deleted file mode 100644
index e87ba45..0000000
--- a/runtime/interpreter/mterp/x86/bindiv.S
+++ /dev/null
@@ -1,48 +0,0 @@
-%default {"result":"","special":"","rem":""}
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # ecx <- vCC
- mov rIBASE, LOCAL0(%esp)
- testl %ecx, %ecx
- je common_errDivideByZero
- movl %eax, %edx
- orl %ecx, %edx
- testl $$0xFFFFFF00, %edx # If both arguments are less
- # than 8-bit and +ve
- jz .L${opcode}_8 # Do 8-bit divide
- testl $$0xFFFF0000, %edx # If both arguments are less
- # than 16-bit and +ve
- jz .L${opcode}_16 # Do 16-bit divide
- cmpl $$-1, %ecx
- jne .L${opcode}_32
- cmpl $$0x80000000, %eax
- jne .L${opcode}_32
- movl $special, $result
- jmp .L${opcode}_finish
-.L${opcode}_32:
- cltd
- idivl %ecx
- jmp .L${opcode}_finish
-.L${opcode}_8:
- div %cl # 8-bit divide otherwise.
- # Remainder in %ah, quotient in %al
- .if $rem
- movl %eax, %edx
- shr $$8, %edx
- .else
- andl $$0x000000FF, %eax
- .endif
- jmp .L${opcode}_finish
-.L${opcode}_16:
- xorl %edx, %edx # Clear %edx before divide
- div %cx
-.L${opcode}_finish:
- SET_VREG $result, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv2addr.S b/runtime/interpreter/mterp/x86/bindiv2addr.S
deleted file mode 100644
index e620996..0000000
--- a/runtime/interpreter/mterp/x86/bindiv2addr.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- mov rIBASE, LOCAL0(%esp)
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $$-1, %ecx
- jne .L${opcode}_continue_div2addr
- cmpl $$0x80000000, %eax
- jne .L${opcode}_continue_div2addr
- movl $special, $result
- SET_VREG $result, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.L${opcode}_continue_div2addr:
- cltd
- idivl %ecx
- SET_VREG $result, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/bindivLit16.S b/runtime/interpreter/mterp/x86/bindivLit16.S
deleted file mode 100644
index be094ae..0000000
--- a/runtime/interpreter/mterp/x86/bindivLit16.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit binary div/rem operation. Handles special case of op0=minint and
- * op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $$0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $$-1, %ecx
- jne .L${opcode}_continue_div
- cmpl $$0x80000000, %eax
- jne .L${opcode}_continue_div
- movl $special, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG $result, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindivLit8.S b/runtime/interpreter/mterp/x86/bindivLit8.S
deleted file mode 100644
index fddb545..0000000
--- a/runtime/interpreter/mterp/x86/bindivLit8.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default {"result":"","special":""}
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $$0x80000000, %eax
- jne .L${opcode}_continue_div
- cmpl $$-1, %ecx
- jne .L${opcode}_continue_div
- movl $special, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_continue_div:
- mov rIBASE, LOCAL0(%esp)
- cltd
- idivl %ecx
- SET_VREG $result, rINST
- mov LOCAL0(%esp), rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop.S b/runtime/interpreter/mterp/x86/binop.S
deleted file mode 100644
index d895235..0000000
--- a/runtime/interpreter/mterp/x86/binop.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- $instr # ex: addl (rFP,%ecx,4),%eax
- SET_VREG $result, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop1.S b/runtime/interpreter/mterp/x86/binop1.S
deleted file mode 100644
index 5049bb3..0000000
--- a/runtime/interpreter/mterp/x86/binop1.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"result":"%eax","tmp":"%ecx"}
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- GET_VREG %ecx, %ecx # eax <- vBB
- $instr # ex: addl %ecx,%eax
- SET_VREG $result, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop2addr.S b/runtime/interpreter/mterp/x86/binop2addr.S
deleted file mode 100644
index f126234..0000000
--- a/runtime/interpreter/mterp/x86/binop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $$0xf, %cl # ecx <- A
- $instr # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/binopLit16.S b/runtime/interpreter/mterp/x86/binopLit16.S
deleted file mode 100644
index 2fd59de..0000000
--- a/runtime/interpreter/mterp/x86/binopLit16.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $$0xf, rINSTbl # rINST <- A
- $instr # for example: addl %ecx, %eax
- SET_VREG $result, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopLit8.S b/runtime/interpreter/mterp/x86/binopLit8.S
deleted file mode 100644
index 67cead2..0000000
--- a/runtime/interpreter/mterp/x86/binopLit8.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %eax # eax <- rBB
- $instr # ex: addl %ecx,%eax
- SET_VREG $result, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide.S b/runtime/interpreter/mterp/x86/binopWide.S
deleted file mode 100644
index da1293d..0000000
--- a/runtime/interpreter/mterp/x86/binopWide.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- $instr1 # ex: addl (rFP,%ecx,4),rIBASE
- $instr2 # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp), rIBASE # restore rIBASE
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide2addr.S b/runtime/interpreter/mterp/x86/binopWide2addr.S
deleted file mode 100644
index da816f4..0000000
--- a/runtime/interpreter/mterp/x86/binopWide2addr.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $$4, %ecx # ecx<- B
- GET_VREG %eax, %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
- andb $$0xF, rINSTbl # rINST<- A
- $instr1 # ex: addl %eax,(rFP,rINST,4)
- $instr2 # ex: adcl %ecx,4(rFP,rINST,4)
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/const.S b/runtime/interpreter/mterp/x86/const.S
deleted file mode 100644
index f0cac1a..0000000
--- a/runtime/interpreter/mterp/x86/const.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/control_flow.S b/runtime/interpreter/mterp/x86/control_flow.S
new file mode 100644
index 0000000..74b4fad
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/control_flow.S
@@ -0,0 +1,219 @@
+%def bincmp(revcmp=""):
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movzx rINSTbl, %ecx # ecx <- A+
+ andb $$0xf, %cl # ecx <- A
+ GET_VREG %eax, %ecx # eax <- vA
+ sarl $$4, rINST # rINST <- B
+ cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
+ j${revcmp} 1f
+ movswl 2(rPC), rINST # Get signed branch offset
+ testl rINST, rINST
+ jmp MterpCommonTakenBranch
+1:
+ cmpw $$JIT_CHECK_OSR, rPROFILE
+ je .L_check_not_taken_osr
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def zcmp(revcmp=""):
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $$0, VREG_ADDRESS(rINST) # compare (vA, 0)
+ j${revcmp} 1f
+ movswl 2(rPC), rINST # fetch signed displacement
+ testl rINST, rINST
+ jmp MterpCommonTakenBranch
+1:
+ cmpw $$JIT_CHECK_OSR, rPROFILE
+ je .L_check_not_taken_osr
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_goto():
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ movsbl rINSTbl, rINST # rINST <- ssssssAA
+ testl rINST, rINST
+ jmp MterpCommonTakenBranch
+
+%def op_goto_16():
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ movswl 2(rPC), rINST # rINST <- ssssAAAA
+ testl rINST, rINST
+ jmp MterpCommonTakenBranch
+
+%def op_goto_32():
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ movl 2(rPC), rINST # rINST <- AAAAAAAA
+ testl rINST, rINST
+ jmp MterpCommonTakenBranch
+
+%def op_if_eq():
+% bincmp(revcmp="ne")
+
+%def op_if_eqz():
+% zcmp(revcmp="ne")
+
+%def op_if_ge():
+% bincmp(revcmp="l")
+
+%def op_if_gez():
+% zcmp(revcmp="l")
+
+%def op_if_gt():
+% bincmp(revcmp="le")
+
+%def op_if_gtz():
+% zcmp(revcmp="le")
+
+%def op_if_le():
+% bincmp(revcmp="g")
+
+%def op_if_lez():
+% zcmp(revcmp="g")
+
+%def op_if_lt():
+% bincmp(revcmp="ge")
+
+%def op_if_ltz():
+% zcmp(revcmp="ge")
+
+%def op_if_ne():
+% bincmp(revcmp="e")
+
+%def op_if_nez():
+% zcmp(revcmp="e")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+/*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movl 2(rPC), %ecx # ecx <- BBBBbbbb
+ GET_VREG %eax, rINST # eax <- vAA
+ leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
+ movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
+ movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
+ call SYMBOL($func)
+ REFRESH_IBASE
+ testl %eax, %eax
+ movl %eax, rINST
+ jmp MterpCommonTakenBranch
+
+%def op_return():
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movl rSELF, %eax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ jz 1f
+ movl %eax, OUT_ARG0(%esp)
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINST # eax <- vAA
+ xorl %ecx, %ecx
+ jmp MterpReturn
+
+%def op_return_object():
+% op_return()
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movl rSELF, %eax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ jz 1f
+ movl %eax, OUT_ARG0(%esp)
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorl %eax, %eax
+ xorl %ecx, %ecx
+ jmp MterpReturn
+
+%def op_return_void_no_barrier():
+ movl rSELF, %eax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ jz 1f
+ movl %eax, OUT_ARG0(%esp)
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorl %eax, %eax
+ xorl %ecx, %ecx
+ jmp MterpReturn
+
+%def op_return_wide():
+/*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movl rSELF, %eax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ jz 1f
+ movl %eax, OUT_ARG0(%esp)
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
+ jmp MterpReturn
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+/*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ GET_VREG %eax, rINST # eax<- vAA (exception object)
+ testl %eax, %eax
+ jz common_errNullObject
+ movl rSELF,%ecx
+ movl %eax, THREAD_EXCEPTION_OFFSET(%ecx)
+ jmp MterpException
diff --git a/runtime/interpreter/mterp/x86/cvtfp_int.S b/runtime/interpreter/mterp/x86/cvtfp_int.S
deleted file mode 100644
index a8bad63..0000000
--- a/runtime/interpreter/mterp/x86/cvtfp_int.S
+++ /dev/null
@@ -1,61 +0,0 @@
-%default {"srcdouble":"1","tgtlong":"1"}
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate. This model
- * differs from what is delivered normally via the x86 fpu, so we have
- * to play some games.
- */
- /* float/double to int/long vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- .if $srcdouble
- fldl VREG_ADDRESS(rINST) # %st0 <- vB
- .else
- flds VREG_ADDRESS(rINST) # %st0 <- vB
- .endif
- ftst
- fnstcw LOCAL0(%esp) # remember original rounding mode
- movzwl LOCAL0(%esp), %eax
- movb $$0xc, %ah
- movw %ax, LOCAL0+2(%esp)
- fldcw LOCAL0+2(%esp) # set "to zero" rounding mode
- andb $$0xf, %cl # ecx <- A
- .if $tgtlong
- fistpll VREG_ADDRESS(%ecx) # convert and store
- .else
- fistpl VREG_ADDRESS(%ecx) # convert and store
- .endif
- fldcw LOCAL0(%esp) # restore previous rounding mode
- .if $tgtlong
- movl $$0x80000000, %eax
- xorl VREG_HIGH_ADDRESS(%ecx), %eax
- orl VREG_ADDRESS(%ecx), %eax
- .else
- cmpl $$0x80000000, VREG_ADDRESS(%ecx)
- .endif
- je .L${opcode}_special_case # fix up result
-
-.L${opcode}_finish:
- xor %eax, %eax
- mov %eax, VREG_REF_ADDRESS(%ecx)
- .if $tgtlong
- mov %eax, VREG_REF_HIGH_ADDRESS(%ecx)
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
-.L${opcode}_special_case:
- fnstsw %ax
- sahf
- jp .L${opcode}_isNaN
- adcl $$-1, VREG_ADDRESS(%ecx)
- .if $tgtlong
- adcl $$-1, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .L${opcode}_finish
-.L${opcode}_isNaN:
- movl $$0, VREG_ADDRESS(%ecx)
- .if $tgtlong
- movl $$0, VREG_HIGH_ADDRESS(%ecx)
- .endif
- jmp .L${opcode}_finish
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
deleted file mode 100644
index 939dc61..0000000
--- a/runtime/interpreter/mterp/x86/entry.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa esp, 4
-
- /* Spill callee save regs */
- PUSH %ebp
- PUSH %edi
- PUSH %esi
- PUSH %ebx
-
- /* Allocate frame */
- subl $$FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Load ShadowFrame pointer */
- movl IN_ARG2(%esp), %edx
-
- /* Remember the return register */
- movl IN_ARG3(%esp), %eax
- movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
-
- /* Remember the code_item */
- movl IN_ARG1(%esp), %ecx
- movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
- leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
- leal (rFP, %eax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
- lea (%ecx, %eax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Set up for backwards branches & osr profiling */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpSetUpHotnessCountdown)
-
- /* Starting ibase */
- REFRESH_IBASE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86/fallback.S b/runtime/interpreter/mterp/x86/fallback.S
deleted file mode 100644
index 8d61166..0000000
--- a/runtime/interpreter/mterp/x86/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
-
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86/floating_point.S
new file mode 100644
index 0000000..3de1fc8
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/floating_point.S
@@ -0,0 +1,236 @@
+%def fpcmp(suff="d", nanval="pos"):
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbl 3(rPC), %ecx # ecx<- CC
+ movzbl 2(rPC), %eax # eax<- BB
+ movs${suff} VREG_ADDRESS(%eax), %xmm0
+ xor %eax, %eax
+ ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
+ jp .L${opcode}_nan_is_${nanval}
+ je .L${opcode}_finish
+ jb .L${opcode}_less
+.L${opcode}_nan_is_pos:
+ incl %eax
+ jmp .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+ decl %eax
+.L${opcode}_finish:
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def fpcvt(instr="", load="", store="", wide="0"):
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movzbl rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ $load VREG_ADDRESS(rINST) # %st0 <- vB
+ andb $$0xf, %cl # ecx <- A
+ $instr
+ $store VREG_ADDRESS(%ecx) # vA <- %st0
+ .if $wide
+ CLEAR_WIDE_REF %ecx
+ .else
+ CLEAR_REF %ecx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def sseBinop(instr="", suff=""):
+ movzbl 2(rPC), %ecx # ecx <- BB
+ movzbl 3(rPC), %eax # eax <- CC
+ movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ ${instr}${suff} VREG_ADDRESS(%eax), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def sseBinop2Addr(instr="", suff=""):
+ movzx rINSTbl, %ecx # ecx <- A+
+ andl $$0xf, %ecx # ecx <- A
+ movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ sarl $$4, rINST # rINST<- B
+ ${instr}${suff} VREG_ADDRESS(rINST), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_double():
+% sseBinop(instr="adds", suff="d")
+
+%def op_add_double_2addr():
+% sseBinop2Addr(instr="adds", suff="d")
+
+%def op_add_float():
+% sseBinop(instr="adds", suff="s")
+
+%def op_add_float_2addr():
+% sseBinop2Addr(instr="adds", suff="s")
+
+%def op_cmpg_double():
+% fpcmp(suff="d", nanval="pos")
+
+%def op_cmpg_float():
+% fpcmp(suff="s", nanval="pos")
+
+%def op_cmpl_double():
+% fpcmp(suff="d", nanval="neg")
+
+%def op_cmpl_float():
+% fpcmp(suff="s", nanval="neg")
+
+%def op_div_double():
+% sseBinop(instr="divs", suff="d")
+
+%def op_div_double_2addr():
+% sseBinop2Addr(instr="divs", suff="d")
+
+%def op_div_float():
+% sseBinop(instr="divs", suff="s")
+
+%def op_div_float_2addr():
+% sseBinop2Addr(instr="divs", suff="s")
+
+%def op_double_to_float():
+% fpcvt(load="fldl", store="fstps")
+
+%def op_double_to_int():
+% cvtfp_int(srcdouble="1", tgtlong="0")
+
+%def op_double_to_long():
+% cvtfp_int(srcdouble="1", tgtlong="1")
+
+%def op_float_to_double():
+% fpcvt(load="flds", store="fstpl", wide="1")
+
+%def op_float_to_int():
+% cvtfp_int(srcdouble="0", tgtlong="0")
+
+%def op_float_to_long():
+% cvtfp_int(srcdouble="0", tgtlong="1")
+
+%def op_int_to_double():
+% fpcvt(load="fildl", store="fstpl", wide="1")
+
+%def op_int_to_float():
+% fpcvt(load="fildl", store="fstps")
+
+%def op_long_to_double():
+% fpcvt(load="fildll", store="fstpl", wide="1")
+
+%def op_long_to_float():
+% fpcvt(load="fildll", store="fstps")
+
+%def op_mul_double():
+% sseBinop(instr="muls", suff="d")
+
+%def op_mul_double_2addr():
+% sseBinop2Addr(instr="muls", suff="d")
+
+%def op_mul_float():
+% sseBinop(instr="muls", suff="s")
+
+%def op_mul_float_2addr():
+% sseBinop2Addr(instr="muls", suff="s")
+
+%def op_neg_double():
+% fpcvt(instr="fchs", load="fldl", store="fstpl", wide="1")
+
+%def op_neg_float():
+% fpcvt(instr="fchs", load="flds", store="fstps")
+
+%def op_rem_double():
+ /* rem_double vAA, vBB, vCC */
+ movzbl 3(rPC), %ecx # ecx <- BB
+ movzbl 2(rPC), %eax # eax <- CC
+ fldl VREG_ADDRESS(%ecx) # %st1 <- fp[vBB]
+ fldl VREG_ADDRESS(%eax) # %st0 <- fp[vCC]
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(rINST) # fp[vAA] <- %st
+ CLEAR_WIDE_REF rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_double_2addr():
+ /* rem_double/2addr vA, vB */
+ movzx rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ fldl VREG_ADDRESS(rINST) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ fldl VREG_ADDRESS(%ecx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(%ecx) # %st to vA
+ CLEAR_WIDE_REF %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_rem_float():
+ /* rem_float vAA, vBB, vCC */
+ movzbl 3(rPC), %ecx # ecx <- BB
+ movzbl 2(rPC), %eax # eax <- CC
+ flds VREG_ADDRESS(%ecx) # vBB to fp stack
+ flds VREG_ADDRESS(%eax) # vCC to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(rINST) # %st to vAA
+ CLEAR_REF rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_float_2addr():
+ /* rem_float/2addr vA, vB */
+ movzx rINSTbl, %ecx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ flds VREG_ADDRESS(rINST) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ flds VREG_ADDRESS(%ecx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(%ecx) # %st to vA
+ CLEAR_REF %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_double():
+% sseBinop(instr="subs", suff="d")
+
+%def op_sub_double_2addr():
+% sseBinop2Addr(instr="subs", suff="d")
+
+%def op_sub_float():
+% sseBinop(instr="subs", suff="s")
+
+%def op_sub_float_2addr():
+% sseBinop2Addr(instr="subs", suff="s")
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
deleted file mode 100644
index 0b08cf9..0000000
--- a/runtime/interpreter/mterp/x86/footer.S
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG0(%esp)
- movl THREAD_FLAGS_OFFSET(%eax), %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movl rSELF, %eax
- testl $$-1, THREAD_EXCEPTION_OFFSET(%eax)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
- movl OFF_FP_DEX_PC(rFP), %ecx
- lea (%eax, %ecx, 2), rPC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decw rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- REFRESH_IBASE
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp) # rSELF in eax
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leal (rPC, rINST, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl $$2, OUT_ARG2(%esp)
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- REFRESH_IBASE
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpLogOSR)
-#endif
- movl $$1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movl rSELF, %eax
- movl %eax, OUT_ARG0(%esp)
- lea OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xor %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $$1, %eax
- jmp MterpDone
-MterpReturn:
- movl OFF_FP_RESULT_REGISTER(rFP), %edx
- movl %eax, (%edx)
- movl %ecx, 4(%edx)
- mov $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- cmpw $$0, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG2(%esp)
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addl $$FRAME_SIZE, %esp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %ebx
- POP %esi
- POP %edi
- POP %ebp
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/fpcmp.S b/runtime/interpreter/mterp/x86/fpcmp.S
deleted file mode 100644
index 5f9eef9..0000000
--- a/runtime/interpreter/mterp/x86/fpcmp.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"suff":"d","nanval":"pos"}
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx<- CC
- movzbl 2(rPC), %eax # eax<- BB
- movs${suff} VREG_ADDRESS(%eax), %xmm0
- xor %eax, %eax
- ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
- jp .L${opcode}_nan_is_${nanval}
- je .L${opcode}_finish
- jb .L${opcode}_less
-.L${opcode}_nan_is_pos:
- incl %eax
- jmp .L${opcode}_finish
-.L${opcode}_nan_is_neg:
-.L${opcode}_less:
- decl %eax
-.L${opcode}_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/fpcvt.S b/runtime/interpreter/mterp/x86/fpcvt.S
deleted file mode 100644
index 7808285..0000000
--- a/runtime/interpreter/mterp/x86/fpcvt.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"instr":"","load":"","store":"","wide":"0"}
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- $load VREG_ADDRESS(rINST) # %st0 <- vB
- andb $$0xf, %cl # ecx <- A
- $instr
- $store VREG_ADDRESS(%ecx) # vA <- %st0
- .if $wide
- CLEAR_WIDE_REF %ecx
- .else
- CLEAR_REF %ecx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
deleted file mode 100644
index a79db27..0000000
--- a/runtime/interpreter/mterp/x86/header.S
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86 ABI general notes:
-
-Caller save set:
- eax, edx, ecx, st(0)-st(7)
-Callee save set:
- ebx, esi, edi, ebp
-Return regs:
- 32-bit in eax
- 64-bit in edx:eax (low-order 32 in eax)
- fp on top of fp stack st(0)
-
-Parameters passed on stack, pushed right-to-left. On entry to target, first
-parm is at 4(%esp). Traditional entry code is:
-
-functEntry:
- push %ebp # save old frame pointer
- mov %ebp,%esp # establish new frame pointer
- sub FrameSize,%esp # Allocate storage for spill, locals & outs
-
-Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPC esi interpreted program counter, used for fetching instructions
- rFP edi interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE edx base of instruction handler table
- rREFS ebp base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $$(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $$value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushl \_reg
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popl \_reg
- .cfi_adjust_cfa_offset -4
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 4 bytes for return address + 4 * 4 for spills
- */
-#define FRAME_SIZE 28
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 (FRAME_SIZE + 16 + 16)
-#define IN_ARG2 (FRAME_SIZE + 16 + 12)
-#define IN_ARG1 (FRAME_SIZE + 16 + 8)
-#define IN_ARG0 (FRAME_SIZE + 16 + 4)
-/* Spill offsets relative to %esp */
-#define LOCAL0 (FRAME_SIZE - 4)
-#define LOCAL1 (FRAME_SIZE - 8)
-#define LOCAL2 (FRAME_SIZE - 12)
-/* Out Arg offsets, relative to %esp */
-#define OUT_ARG3 ( 12)
-#define OUT_ARG2 ( 8)
-#define OUT_ARG1 ( 4)
-#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF IN_ARG0(%esp)
-#define rPC %esi
-#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi).
-#define CFI_TMP 0 // DWARF register number of the first argument register (eax).
-#define rFP %edi
-#define rINST %ebx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %edx
-#define rREFS %ebp
-#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movl rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
- */
-.macro RESTORE_IBASE
- movl rSELF, rIBASE
- movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
-.endm
-
-/*
- * If rSELF is already loaded then we can use it from known reg.
- */
-.macro RESTORE_IBASE_FROM_SELF _reg
- movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb MACRO_LITERAL(\_opnum), rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwl (rPC), rINST
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(${handler_size_bits}), %eax
- addl rIBASE, %eax
- jmp *%eax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leal 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value to xmm. */
-.macro GET_WIDE_FP_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value from xmm. xmm is clobbered. */
-.macro SET_WIDE_FP_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- pxor \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
diff --git a/runtime/interpreter/mterp/x86/instruction_end.S b/runtime/interpreter/mterp/x86/instruction_end.S
deleted file mode 100644
index 94587f8..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_alt.S b/runtime/interpreter/mterp/x86/instruction_end_alt.S
deleted file mode 100644
index 7757bce..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_end_sister.S b/runtime/interpreter/mterp/x86/instruction_end_sister.S
deleted file mode 100644
index 8eb79ac..0000000
--- a/runtime/interpreter/mterp/x86/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmSisterEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
- .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86/instruction_start.S b/runtime/interpreter/mterp/x86/instruction_start.S
deleted file mode 100644
index 5d29a819..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/x86/instruction_start_alt.S b/runtime/interpreter/mterp/x86/instruction_start_alt.S
deleted file mode 100644
index 8dcf5bf..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86/instruction_start_sister.S b/runtime/interpreter/mterp/x86/instruction_start_sister.S
deleted file mode 100644
index 796e98b..0000000
--- a/runtime/interpreter/mterp/x86/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmSisterStart)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
- .global SYMBOL(artMterpAsmSisterStart)
- .text
- .balign 4
-SYMBOL(artMterpAsmSisterStart):
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index c23053b..587c4cf 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -23,3 +23,99 @@
RESTORE_IBASE
FETCH_INST
GOTO_NEXT
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG3(%esp)
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ RESTORE_IBASE
+ FETCH_INST
+ GOTO_NEXT
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/x86/invoke_polymorphic.S b/runtime/interpreter/mterp/x86/invoke_polymorphic.S
deleted file mode 100644
index 5690b22..0000000
--- a/runtime/interpreter/mterp/x86/invoke_polymorphic.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- movl rSELF, %ecx
- movl %ecx, OUT_ARG0(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG1(%esp)
- movl rPC, OUT_ARG2(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG3(%esp)
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- RESTORE_IBASE
- FETCH_INST
- GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
new file mode 100644
index 0000000..04b653e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -0,0 +1,789 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+x86 ABI general notes:
+
+Caller save set:
+ eax, edx, ecx, st(0)-st(7)
+Callee save set:
+ ebx, esi, edi, ebp
+Return regs:
+ 32-bit in eax
+ 64-bit in edx:eax (low-order 32 in eax)
+ fp on top of fp stack st(0)
+
+Parameters passed on stack, pushed right-to-left. On entry to target, first
+parm is at 4(%esp). Traditional entry code is:
+
+functEntry:
+ push %ebp # save old frame pointer
+ mov %ebp,%esp # establish new frame pointer
+ sub FrameSize,%esp # Allocate storage for spill, locals & outs
+
+Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+ nick reg purpose
+ rPC esi interpreted program counter, used for fetching instructions
+ rFP edi interpreted frame pointer, used for accessing locals and args
+ rINSTw bx first 16-bit code of current instruction
+ rINSTbl bl opcode portion of instruction word
+ rINSTbh bh high byte of inst word, usually contains src/tgt reg names
+ rIBASE edx base of instruction handler table
+ rREFS ebp base of object references in shadow frame.
+
+Notes:
+ o High order 16 bits of ebx must be zero on entry to handler
+ o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+ o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $$(value)
+ #define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+ #define ASM_HIDDEN .private_extern
+#else
+ #define MACRO_LITERAL(value) $$value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+ #define ASM_HIDDEN .hidden
+#endif
+
+.macro PUSH _reg
+ pushl \_reg
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popl \_reg
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore \_reg
+.endm
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
+#define OFF_FP_SHADOWFRAME OFF_FP(0)
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 4 bytes for return address + 4 * 4 for spills
+ */
+#define FRAME_SIZE 28
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3 (FRAME_SIZE + 16 + 16)
+#define IN_ARG2 (FRAME_SIZE + 16 + 12)
+#define IN_ARG1 (FRAME_SIZE + 16 + 8)
+#define IN_ARG0 (FRAME_SIZE + 16 + 4)
+/* Spill offsets relative to %esp */
+#define LOCAL0 (FRAME_SIZE - 4)
+#define LOCAL1 (FRAME_SIZE - 8)
+#define LOCAL2 (FRAME_SIZE - 12)
+/* Out Arg offsets, relative to %esp */
+#define OUT_ARG3 ( 12)
+#define OUT_ARG2 ( 8)
+#define OUT_ARG1 ( 4)
+#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF IN_ARG0(%esp)
+#define rPC %esi
+#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi).
+#define CFI_TMP 0 // DWARF register number of the first argument register (eax).
+#define rFP %edi
+#define rINST %ebx
+#define rINSTw %bx
+#define rINSTbh %bh
+#define rINSTbl %bl
+#define rIBASE %edx
+#define rREFS %ebp
+#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ movl rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ */
+.macro REFRESH_IBASE
+ movl rSELF, rIBASE
+ movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
+ */
+.macro RESTORE_IBASE
+ movl rSELF, rIBASE
+ movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
+.endm
+
+/*
+ * If rSELF is already loaded then we can use it from known reg.
+ */
+.macro RESTORE_IBASE_FROM_SELF _reg
+ movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+ movb rINSTbl, rINSTbh
+ movb MACRO_LITERAL(\_opnum), rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
+ */
+.macro FETCH_INST
+ movzwl (rPC), rINST
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+ movzx rINSTbl,%eax
+ movzbl rINSTbh,rINST
+ shll MACRO_LITERAL(${handler_size_bits}), %eax
+ addl rIBASE, %eax
+ jmp *%eax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+ leal 2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+ ADVANCE_PC \_count
+ FETCH_INST
+ GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+ movl VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+/* Read wide value to xmm. */
+.macro GET_WIDE_FP_VREG _reg _vreg
+ movq VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+/* Write wide value from xmm. xmm is clobbered. */
+.macro SET_WIDE_FP_VREG _reg _vreg
+ movq \_reg, VREG_ADDRESS(\_vreg)
+ pxor \_reg, \_reg
+ movq \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+ movl VREG_HIGH_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+ movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_REF _vreg
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+ .text
+ ASM_HIDDEN SYMBOL(\name)
+ .global SYMBOL(\name)
+ FUNCTION_TYPE(\name)
+SYMBOL(\name):
+.endm
+
+.macro END name
+ SIZE(\name,\name)
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ *
+ * On entry:
+ * 0 Thread* self
+ * 1 insns_
+ * 2 ShadowFrame
+ * 3 JValue* result_register
+ *
+ */
+ENTRY ExecuteMterpImpl
+ .cfi_startproc
+ .cfi_def_cfa esp, 4
+
+ /* Spill callee save regs */
+ PUSH %ebp
+ PUSH %edi
+ PUSH %esi
+ PUSH %ebx
+
+ /* Allocate frame */
+ subl $$FRAME_SIZE, %esp
+ .cfi_adjust_cfa_offset FRAME_SIZE
+
+ /* Load ShadowFrame pointer */
+ movl IN_ARG2(%esp), %edx
+
+ /* Remember the return register */
+ movl IN_ARG3(%esp), %eax
+ movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx)
+
+ /* Remember the code_item */
+ movl IN_ARG1(%esp), %ecx
+ movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx)
+
+ /* set up "named" registers */
+ movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax
+ leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP
+ leal (rFP, %eax, 4), rREFS
+ movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax
+ lea (%ecx, %eax, 2), rPC
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+ EXPORT_PC
+
+ /* Set up for backwards branches & osr profiling */
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG2(%esp)
+ call SYMBOL(MterpSetUpHotnessCountdown)
+
+ /* Starting ibase */
+ REFRESH_IBASE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+ // cfi info continues, and covers the whole mterp implementation.
+ END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
+ .extern MterpCheckBefore
+ popl %eax # Return address (the instuction handler).
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rPC, OUT_ARG2(%esp)
+ pushl %eax # Return address for the tail call.
+ jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ call SYMBOL(Mterp_dchecks_before_helper)
+ REFRESH_IBASE
+ #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+%def helpers():
+ ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogDivideByZeroException)
+#endif
+ jmp MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogArrayIndexException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogNoSuchMethodException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogNullObjectException)
+#endif
+ jmp MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ call SYMBOL(MterpLogExceptionThrownException)
+#endif
+ jmp MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG0(%esp)
+ movl THREAD_FLAGS_OFFSET(%eax), %eax
+ movl %eax, OUT_ARG2(%esp)
+ call SYMBOL(MterpLogSuspendFallback)
+#endif
+ jmp MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ movl rSELF, %eax
+ testl $$-1, THREAD_EXCEPTION_OFFSET(%eax)
+ jz MterpFallback
+ /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpHandleException)
+ testb %al, %al
+ jz MterpExceptionReturn
+ movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax
+ movl OFF_FP_DEX_PC(rFP), %ecx
+ lea (%eax, %ecx, 2), rPC
+ movl rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* Do we need to switch interpreters? */
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ /* resume execution at catch block */
+ REFRESH_IBASE
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * rINST <= signed offset
+ * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranch:
+ jg .L_forward_branch # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+# error "JIT_CHECK_OSR must be -1."
+#endif
+ cmpw $$JIT_CHECK_OSR, rPROFILE
+ je .L_osr_check
+ decw rPROFILE
+ je .L_add_batch # counted down to zero - report
+.L_resume_backward_branch:
+ movl rSELF, %eax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
+ leal (rPC, rINST, 2), rPC
+ FETCH_INST
+ jnz .L_suspend_request_pending
+ REFRESH_IBASE
+ GOTO_NEXT
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ movl %eax, OUT_ARG0(%esp) # rSELF in eax
+ call SYMBOL(MterpSuspendCheck) # (self)
+ testb %al, %al
+ jnz MterpFallback
+ REFRESH_IBASE # might have changed during suspend
+ GOTO_NEXT
+
+.L_no_count_backwards:
+ cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
+ jne .L_resume_backward_branch
+.L_osr_check:
+ EXPORT_PC
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ jz .L_resume_backward_branch
+ jmp MterpOnStackReplacement
+
+.L_forward_branch:
+ cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
+ je .L_check_osr_forward
+.L_resume_forward_branch:
+ leal (rPC, rINST, 2), rPC
+ FETCH_INST
+ GOTO_NEXT
+
+.L_check_osr_forward:
+ EXPORT_PC
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ REFRESH_IBASE
+ jz .L_resume_forward_branch
+ jmp MterpOnStackReplacement
+
+.L_add_batch:
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG2(%esp)
+ call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ jmp .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ EXPORT_PC
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl $$2, OUT_ARG2(%esp)
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ REFRESH_IBASE
+ jnz MterpOnStackReplacement
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpLogOSR)
+#endif
+ movl $$1, %eax
+ jmp MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movl rSELF, %eax
+ movl %eax, OUT_ARG0(%esp)
+ lea OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ call SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ xor %eax, %eax
+ jmp MterpDone
+
+/*
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ movl $$1, %eax
+ jmp MterpDone
+MterpReturn:
+ movl OFF_FP_RESULT_REGISTER(rFP), %edx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ mov $$1, %eax
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ cmpw $$0, rPROFILE
+ jle MRestoreFrame # if > 0, we may have some counts to report.
+
+ movl %eax, rINST # stash return value
+ /* Report cached hotness counts */
+ movl OFF_FP_METHOD(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG2(%esp)
+ call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ movl rINST, %eax # restore return value
+
+ /* pop up frame */
+MRestoreFrame:
+ addl $$FRAME_SIZE, %esp
+ .cfi_adjust_cfa_offset -FRAME_SIZE
+
+ /* Restore callee save register */
+ POP %ebx
+ POP %esi
+ POP %edi
+ POP %ebp
+ ret
+ .cfi_endproc
+ END MterpHelpers
+
+%def instruction_end():
+
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
+ .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+%def instruction_start():
+
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
+ .global SYMBOL(artMterpAsmInstructionStart)
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+ .text
+
+%def opcode_start():
+ ENTRY Mterp_${opcode}
+%def opcode_end():
+ END Mterp_${opcode}
+%def helper_start(name):
+ ENTRY ${name}
+%def helper_end(name):
+ END ${name}
diff --git a/runtime/interpreter/mterp/x86/object.S b/runtime/interpreter/mterp/x86/object.S
new file mode 100644
index 0000000..a47fa3a
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/object.S
@@ -0,0 +1,278 @@
+%def field(helper=""):
+ /*
+ * General field read / write (iget-* iput-* sget-* sput-*).
+ */
+ .extern $helper
+ REFRESH_INST ${opnum} # fix rINST to include opcode
+ movl rPC, OUT_ARG0(%esp) # arg0: Instruction* inst
+ movl rINST, OUT_ARG1(%esp) # arg1: uint16_t inst_data
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp) # arg2: ShadowFrame* sf
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp) # arg3: Thread* self
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpPossibleException
+ RESTORE_IBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_check_cast():
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ leal VREG_ADDRESS(rINST), %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ movl OFF_FP_METHOD(rFP),%eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG3(%esp)
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+% field(helper=helper)
+
+%def op_iget_boolean():
+% op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="movsbl")
+
+%def op_iget_byte():
+% op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="movsbl")
+
+%def op_iget_char():
+% op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="movzwl")
+
+%def op_iget_object():
+% op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
+ movzwl 2(rPC), %eax # eax <- field byte offset
+ movl %ecx, OUT_ARG0(%esp)
+ movl %eax, OUT_ARG1(%esp)
+ EXPORT_PC
+ call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
+ movl rSELF, %ecx
+ RESTORE_IBASE_FROM_SELF %ecx
+ cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+ jnz MterpException # bail out
+ andb $$0xf,rINSTbl # rINST <- A
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_quick(load="movl"):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
+ movzwl 2(rPC), %eax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ ${load} (%ecx,%eax,1), %eax
+ andb $$0xf,rINSTbl # rINST <- A
+ SET_VREG %eax, rINST # fp[A] <- value
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_short():
+% op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="movswl")
+
+%def op_iget_wide():
+% op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
+ movzwl 2(rPC), %eax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ movq (%ecx,%eax,1), %xmm0
+ andb $$0xf, rINSTbl # rINST <- A
+ SET_WIDE_FP_VREG %xmm0, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_instance_of():
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, %eax # eax <- BA
+ sarl $$4, %eax # eax <- B
+ leal VREG_ADDRESS(%eax), %ecx # Get object address
+ movl %ecx, OUT_ARG1(%esp)
+ movl OFF_FP_METHOD(rFP),%eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG3(%esp)
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
+ movl rSELF, %ecx
+ RESTORE_IBASE_FROM_SELF %ecx
+ cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+ jnz MterpException
+ andb $$0xf, rINSTbl # rINSTbl <- A
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_object():
+% op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ EXPORT_PC
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ movl rPC, OUT_ARG1(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpIputObjectQuick)
+ testb %al, %al
+ jz MterpException
+ RESTORE_IBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_quick(reg="rINST", store="movl"):
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINST # rINST <- v[A]
+ movzwl 2(rPC), %eax # eax <- field byte offset
+ ${store} ${reg}, (%ecx,%eax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ movzwl 2(rPC), %eax # eax<- field byte offset
+ leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
+ andb $$0xf, rINSTbl # rINST<- A
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
+ movq %xmm0, (%ecx) # obj.field<- r0/r1
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_new_instance():
+/*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG0(%esp)
+ movl rSELF, %ecx
+ movl %ecx, OUT_ARG1(%esp)
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_ARG2(%esp)
+ call SYMBOL(MterpNewInstance)
+ RESTORE_IBASE
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86/op_add_double.S b/runtime/interpreter/mterp/x86/op_add_double.S
deleted file mode 100644
index de2708f..0000000
--- a/runtime/interpreter/mterp/x86/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_add_double_2addr.S b/runtime/interpreter/mterp/x86/op_add_double_2addr.S
deleted file mode 100644
index 538c9ab..0000000
--- a/runtime/interpreter/mterp/x86/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_add_float.S b/runtime/interpreter/mterp/x86/op_add_float.S
deleted file mode 100644
index 80b1736..0000000
--- a/runtime/interpreter/mterp/x86/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_add_float_2addr.S b/runtime/interpreter/mterp/x86/op_add_float_2addr.S
deleted file mode 100644
index 6649253..0000000
--- a/runtime/interpreter/mterp/x86/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int.S b/runtime/interpreter/mterp/x86/op_add_int.S
deleted file mode 100644
index f71a56b..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"addl (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_2addr.S b/runtime/interpreter/mterp/x86/op_add_int_2addr.S
deleted file mode 100644
index 5d43b65..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"addl %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_lit16.S b/runtime/interpreter/mterp/x86/op_add_int_lit16.S
deleted file mode 100644
index 4f34d17..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_int_lit8.S b/runtime/interpreter/mterp/x86/op_add_int_lit8.S
deleted file mode 100644
index 3f14744..0000000
--- a/runtime/interpreter/mterp/x86/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_long.S b/runtime/interpreter/mterp/x86/op_add_long.S
deleted file mode 100644
index dce0c26..0000000
--- a/runtime/interpreter/mterp/x86/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"addl (rFP,%ecx,4), rIBASE", "instr2":"adcl 4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_add_long_2addr.S b/runtime/interpreter/mterp/x86/op_add_long_2addr.S
deleted file mode 100644
index 7847640..0000000
--- a/runtime/interpreter/mterp/x86/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"addl %eax, (rFP,rINST,4)","instr2":"adcl %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_aget.S b/runtime/interpreter/mterp/x86/op_aget.S
deleted file mode 100644
index 338386f..0000000
--- a/runtime/interpreter/mterp/x86/op_aget.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- $load $data_offset(%eax,%ecx,$shift), %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_boolean.S b/runtime/interpreter/mterp/x86/op_aget_boolean.S
deleted file mode 100644
index d910c94..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_byte.S b/runtime/interpreter/mterp/x86/op_aget_byte.S
deleted file mode 100644
index aba9ffc..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_char.S b/runtime/interpreter/mterp/x86/op_aget_char.S
deleted file mode 100644
index 748e410..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_object.S b/runtime/interpreter/mterp/x86/op_aget_object.S
deleted file mode 100644
index 35ec053..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_object.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
- EXPORT_PC
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_short.S b/runtime/interpreter/mterp/x86/op_aget_short.S
deleted file mode 100644
index 6eaf5d9..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aget_wide.S b/runtime/interpreter/mterp/x86/op_aget_wide.S
deleted file mode 100644
index 92c612a..0000000
--- a/runtime/interpreter/mterp/x86/op_aget_wide.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Array get, 64 bits. vAA <- vBB[vCC].
- */
- /* aget-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_and_int.S b/runtime/interpreter/mterp/x86/op_and_int.S
deleted file mode 100644
index 6272c4e..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"andl (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_2addr.S b/runtime/interpreter/mterp/x86/op_and_int_2addr.S
deleted file mode 100644
index 95df873..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"andl %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_lit16.S b/runtime/interpreter/mterp/x86/op_and_int_lit16.S
deleted file mode 100644
index b062064..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_int_lit8.S b/runtime/interpreter/mterp/x86/op_and_int_lit8.S
deleted file mode 100644
index 99915df..0000000
--- a/runtime/interpreter/mterp/x86/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_long.S b/runtime/interpreter/mterp/x86/op_and_long.S
deleted file mode 100644
index f8514ea..0000000
--- a/runtime/interpreter/mterp/x86/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"andl (rFP,%ecx,4), rIBASE", "instr2":"andl 4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_and_long_2addr.S b/runtime/interpreter/mterp/x86/op_and_long_2addr.S
deleted file mode 100644
index 37249b8..0000000
--- a/runtime/interpreter/mterp/x86/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"andl %eax, (rFP,rINST,4)","instr2":"andl %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_aput.S b/runtime/interpreter/mterp/x86/op_aput.S
deleted file mode 100644
index 9d8c52d..0000000
--- a/runtime/interpreter/mterp/x86/op_aput.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short
- *
- */
- /* op vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal $data_offset(%eax,%ecx,$shift), %eax
- GET_VREG rINST, rINST
- $store $reg, (%eax)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_boolean.S b/runtime/interpreter/mterp/x86/op_aput_boolean.S
deleted file mode 100644
index e7fdd53..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_byte.S b/runtime/interpreter/mterp/x86/op_aput_byte.S
deleted file mode 100644
index 491d03c..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_char.S b/runtime/interpreter/mterp/x86/op_aput_char.S
deleted file mode 100644
index ca42cf0..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_object.S b/runtime/interpreter/mterp/x86/op_aput_object.S
deleted file mode 100644
index 980b26a..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_object.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpAputObject) # (array, index)
- RESTORE_IBASE
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_short.S b/runtime/interpreter/mterp/x86/op_aput_short.S
deleted file mode 100644
index 5e63482..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86/op_aput_wide.S b/runtime/interpreter/mterp/x86/op_aput_wide.S
deleted file mode 100644
index 43ef64a..0000000
--- a/runtime/interpreter/mterp/x86/op_aput_wide.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Array put, 64 bits. vBB[vCC] <- vAA.
- *
- */
- /* aput-wide vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB (array object)
- GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
- movq %xmm0, (%eax) # vBB[vCC] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_array_length.S b/runtime/interpreter/mterp/x86/op_array_length.S
deleted file mode 100644
index 60ed80b..0000000
--- a/runtime/interpreter/mterp/x86/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Return the length of an array.
- */
- mov rINST, %eax # eax <- BA
- sarl $$4, rINST # rINST <- B
- GET_VREG %ecx, rINST # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $$0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST, %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_check_cast.S b/runtime/interpreter/mterp/x86/op_check_cast.S
deleted file mode 100644
index d090aa3..0000000
--- a/runtime/interpreter/mterp/x86/op_check_cast.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- leal VREG_ADDRESS(rINST), %ecx
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_cmp_long.S b/runtime/interpreter/mterp/x86/op_cmp_long.S
deleted file mode 100644
index 1f729b0..0000000
--- a/runtime/interpreter/mterp/x86/op_cmp_long.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
- cmpl VREG_HIGH_ADDRESS(%ecx), %eax
- jl .L${opcode}_smaller
- jg .L${opcode}_bigger
- movzbl 2(rPC), %eax # eax <- BB, restore BB
- GET_VREG %eax, %eax # eax <- v[BB]
- sub VREG_ADDRESS(%ecx), %eax
- ja .L${opcode}_bigger
- jb .L${opcode}_smaller
-.L${opcode}_finish:
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-.L${opcode}_bigger:
- movl $$1, %eax
- jmp .L${opcode}_finish
-
-.L${opcode}_smaller:
- movl $$-1, %eax
- jmp .L${opcode}_finish
diff --git a/runtime/interpreter/mterp/x86/op_cmpg_double.S b/runtime/interpreter/mterp/x86/op_cmpg_double.S
deleted file mode 100644
index a73ba55..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpg_float.S b/runtime/interpreter/mterp/x86/op_cmpg_float.S
deleted file mode 100644
index 648051b..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpl_double.S b/runtime/interpreter/mterp/x86/op_cmpl_double.S
deleted file mode 100644
index 058163e..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86/op_cmpl_float.S b/runtime/interpreter/mterp/x86/op_cmpl_float.S
deleted file mode 100644
index 302f078..0000000
--- a/runtime/interpreter/mterp/x86/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86/op_const.S b/runtime/interpreter/mterp/x86/op_const.S
deleted file mode 100644
index 544d63b..0000000
--- a/runtime/interpreter/mterp/x86/op_const.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINST # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_16.S b/runtime/interpreter/mterp/x86/op_const_16.S
deleted file mode 100644
index 97cd5fa..0000000
--- a/runtime/interpreter/mterp/x86/op_const_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINST # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_4.S b/runtime/interpreter/mterp/x86/op_const_4.S
deleted file mode 100644
index a60ba96..0000000
--- a/runtime/interpreter/mterp/x86/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/4 vA, #+B */
- movsx rINSTbl, %eax # eax <-ssssssBx
- movl $$0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $$4, %eax
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_const_class.S b/runtime/interpreter/mterp/x86/op_const_class.S
deleted file mode 100644
index 71648b5..0000000
--- a/runtime/interpreter/mterp/x86/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86/op_const_high16.S b/runtime/interpreter/mterp/x86/op_const_high16.S
deleted file mode 100644
index 576967a..0000000
--- a/runtime/interpreter/mterp/x86/op_const_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINST # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_method_handle.S b/runtime/interpreter/mterp/x86/op_const_method_handle.S
deleted file mode 100644
index 77948fd..0000000
--- a/runtime/interpreter/mterp/x86/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86/op_const_method_type.S b/runtime/interpreter/mterp/x86/op_const_method_type.S
deleted file mode 100644
index 03c6ce5..0000000
--- a/runtime/interpreter/mterp/x86/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86/op_const_string.S b/runtime/interpreter/mterp/x86/op_const_string.S
deleted file mode 100644
index 5553aab..0000000
--- a/runtime/interpreter/mterp/x86/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
deleted file mode 100644
index e7f952a..0000000
--- a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, OUT_ARG1(%esp)
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide.S b/runtime/interpreter/mterp/x86/op_const_wide.S
deleted file mode 100644
index 3750728..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movl 2(rPC), %eax # eax <- lsw
- movzbl rINSTbl, %ecx # ecx <- AA
- movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax, %ecx
- SET_VREG_HIGH rINST, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_16.S b/runtime/interpreter/mterp/x86/op_const_wide_16.S
deleted file mode 100644
index 1331c32..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_16.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const-wide/16 vAA, #+BBBB */
- movswl 2(rPC), %eax # eax <- ssssBBBB
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_32.S b/runtime/interpreter/mterp/x86/op_const_wide_32.S
deleted file mode 100644
index ed7d62b..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_32.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* const-wide/32 vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # eax <- BBBBbbbb
- movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
- cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE, rINST # store msw
- SET_VREG %eax, rINST # store lsw
- movl %ecx, rIBASE # restore rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_high16.S b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
deleted file mode 100644
index 11b9310..0000000
--- a/runtime/interpreter/mterp/x86/op_const_wide_high16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
- xorl %eax, %eax
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_double.S b/runtime/interpreter/mterp/x86/op_div_double.S
deleted file mode 100644
index 575716d..0000000
--- a/runtime/interpreter/mterp/x86/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_div_double_2addr.S b/runtime/interpreter/mterp/x86/op_div_double_2addr.S
deleted file mode 100644
index 8229a31..0000000
--- a/runtime/interpreter/mterp/x86/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_div_float.S b/runtime/interpreter/mterp/x86/op_div_float.S
deleted file mode 100644
index 250f1dc..0000000
--- a/runtime/interpreter/mterp/x86/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_div_float_2addr.S b/runtime/interpreter/mterp/x86/op_div_float_2addr.S
deleted file mode 100644
index c30d148..0000000
--- a/runtime/interpreter/mterp/x86/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int.S b/runtime/interpreter/mterp/x86/op_div_int.S
deleted file mode 100644
index 5fc8fa5..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv.S" {"result":"%eax","special":"$0x80000000","rem":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_2addr.S b/runtime/interpreter/mterp/x86/op_div_int_2addr.S
deleted file mode 100644
index 04cf1ba..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv2addr.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_lit16.S b/runtime/interpreter/mterp/x86/op_div_int_lit16.S
deleted file mode 100644
index dd396bb..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit16.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_int_lit8.S b/runtime/interpreter/mterp/x86/op_div_int_lit8.S
deleted file mode 100644
index 3cbd9d0..0000000
--- a/runtime/interpreter/mterp/x86/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit8.S" {"result":"%eax","special":"$0x80000000"}
diff --git a/runtime/interpreter/mterp/x86/op_div_long.S b/runtime/interpreter/mterp/x86/op_div_long.S
deleted file mode 100644
index e56a035..0000000
--- a/runtime/interpreter/mterp/x86/op_div_long.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default {"routine":"art_quick_ldiv"}
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div vAA, vBB, vCC */
- .extern $routine
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %ecx, %edx
- orl %ebx, %ecx
- jz common_errDivideByZero
- movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx, %eax
- GET_VREG %eax, %eax
- call SYMBOL($routine)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long_2addr.S b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
deleted file mode 100644
index 159cc44..0000000
--- a/runtime/interpreter/mterp/x86/op_div_long_2addr.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default {"routine":"art_quick_ldiv"}
-/* art_quick_* methods has quick abi,
- * so use eax, ecx, edx, ebx for args
- */
- /* div/2addr vA, vB */
- .extern $routine
- mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
- movzbl rINSTbl, %eax
- shrl $$4, %eax # eax <- B
- andb $$0xf, rINSTbl # rINST <- A
- mov rINST, LOCAL1(%esp) # save rINST/%ebx
- movl %ebx, %ecx
- GET_VREG %edx, %eax
- GET_VREG_HIGH %ebx, %eax
- movl %edx, %eax
- orl %ebx, %eax
- jz common_errDivideByZero
- GET_VREG %eax, %ecx
- GET_VREG_HIGH %ecx, %ecx
- call SYMBOL($routine)
- mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE, rINST
- SET_VREG %eax, rINST
- mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_double_to_float.S b/runtime/interpreter/mterp/x86/op_double_to_float.S
deleted file mode 100644
index 5135d60..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fldl","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_double_to_int.S b/runtime/interpreter/mterp/x86/op_double_to_int.S
deleted file mode 100644
index 9c4e11c..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"1","tgtlong":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_double_to_long.S b/runtime/interpreter/mterp/x86/op_double_to_long.S
deleted file mode 100644
index fe0eee2..0000000
--- a/runtime/interpreter/mterp/x86/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"1","tgtlong":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_fill_array_data.S b/runtime/interpreter/mterp/x86/op_fill_array_data.S
deleted file mode 100644
index 5855284..0000000
--- a/runtime/interpreter/mterp/x86/op_fill_array_data.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax, rINST # eax <- vAA (array object)
- movl %eax, OUT_ARG0(%esp)
- movl %ecx, OUT_ARG1(%esp)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array.S b/runtime/interpreter/mterp/x86/op_filled_new_array.S
deleted file mode 100644
index 35b2fe8..0000000
--- a/runtime/interpreter/mterp/x86/op_filled_new_array.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp)
- call SYMBOL($helper)
- REFRESH_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86/op_filled_new_array_range.S
deleted file mode 100644
index 841059e..0000000
--- a/runtime/interpreter/mterp/x86/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86/op_float_to_double.S b/runtime/interpreter/mterp/x86/op_float_to_double.S
deleted file mode 100644
index 12a3e14..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"flds","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_float_to_int.S b/runtime/interpreter/mterp/x86/op_float_to_int.S
deleted file mode 100644
index ac57388..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"0","tgtlong":"0"}
diff --git a/runtime/interpreter/mterp/x86/op_float_to_long.S b/runtime/interpreter/mterp/x86/op_float_to_long.S
deleted file mode 100644
index be1d982..0000000
--- a/runtime/interpreter/mterp/x86/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/cvtfp_int.S" {"srcdouble":"0","tgtlong":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_goto.S b/runtime/interpreter/mterp/x86/op_goto.S
deleted file mode 100644
index 1827d68..0000000
--- a/runtime/interpreter/mterp/x86/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbl rINSTbl, rINST # rINST <- ssssssAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_goto_16.S b/runtime/interpreter/mterp/x86/op_goto_16.S
deleted file mode 100644
index ea5ea90..0000000
--- a/runtime/interpreter/mterp/x86/op_goto_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswl 2(rPC), rINST # rINST <- ssssAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_goto_32.S b/runtime/interpreter/mterp/x86/op_goto_32.S
deleted file mode 100644
index 4becaf3..0000000
--- a/runtime/interpreter/mterp/x86/op_goto_32.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Unlike most opcodes, this one is allowed to branch to itself, so
- * our "backward branch" test must be "<=0" instead of "<0". Because
- * we need the V bit set, we'll use an adds to convert from Dalvik
- * offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movl 2(rPC), rINST # rINST <- AAAAAAAA
- testl rINST, rINST
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_if_eq.S b/runtime/interpreter/mterp/x86/op_if_eq.S
deleted file mode 100644
index 5413d98..0000000
--- a/runtime/interpreter/mterp/x86/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86/op_if_eqz.S b/runtime/interpreter/mterp/x86/op_if_eqz.S
deleted file mode 100644
index 53dc99e..0000000
--- a/runtime/interpreter/mterp/x86/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ge.S b/runtime/interpreter/mterp/x86/op_if_ge.S
deleted file mode 100644
index c2ba3c6..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gez.S b/runtime/interpreter/mterp/x86/op_if_gez.S
deleted file mode 100644
index cd2c772..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gt.S b/runtime/interpreter/mterp/x86/op_if_gt.S
deleted file mode 100644
index 9fe84bb..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86/op_if_gtz.S b/runtime/interpreter/mterp/x86/op_if_gtz.S
deleted file mode 100644
index b454ffd..0000000
--- a/runtime/interpreter/mterp/x86/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86/op_if_le.S b/runtime/interpreter/mterp/x86/op_if_le.S
deleted file mode 100644
index 93571a7..0000000
--- a/runtime/interpreter/mterp/x86/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86/op_if_lez.S b/runtime/interpreter/mterp/x86/op_if_lez.S
deleted file mode 100644
index 779c77f..0000000
--- a/runtime/interpreter/mterp/x86/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86/op_if_lt.S b/runtime/interpreter/mterp/x86/op_if_lt.S
deleted file mode 100644
index 1fb1521..0000000
--- a/runtime/interpreter/mterp/x86/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ltz.S b/runtime/interpreter/mterp/x86/op_if_ltz.S
deleted file mode 100644
index 155c356..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86/op_if_ne.S b/runtime/interpreter/mterp/x86/op_if_ne.S
deleted file mode 100644
index 7e1b065..0000000
--- a/runtime/interpreter/mterp/x86/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86/op_if_nez.S b/runtime/interpreter/mterp/x86/op_if_nez.S
deleted file mode 100644
index 8951f5b..0000000
--- a/runtime/interpreter/mterp/x86/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
deleted file mode 100644
index 0af1bec..0000000
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ /dev/null
@@ -1,29 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32"}
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL($helper)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $$0xf, rINSTbl # rINST <- A
- .if $is_object
- SET_VREG_OBJECT %eax, rINST # fp[A] <-value
- .else
- SET_VREG %eax, rINST # fp[A] <-value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean.S b/runtime/interpreter/mterp/x86/op_iget_boolean.S
deleted file mode 100644
index ddccc41..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S
deleted file mode 100644
index 02b0c16..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte.S b/runtime/interpreter/mterp/x86/op_iget_byte.S
deleted file mode 100644
index cd46d3d..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86/op_iget_byte_quick.S
deleted file mode 100644
index 02b0c16..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char.S b/runtime/interpreter/mterp/x86/op_iget_char.S
deleted file mode 100644
index 9969734..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_char_quick.S b/runtime/interpreter/mterp/x86/op_iget_char_quick.S
deleted file mode 100644
index a5d9712..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object.S b/runtime/interpreter/mterp/x86/op_iget_object.S
deleted file mode 100644
index 3d421fc..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_object_quick.S b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
deleted file mode 100644
index b1551a0..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_object_quick.S
+++ /dev/null
@@ -1,17 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- movl %ecx, OUT_ARG0(%esp)
- movl %eax, OUT_ARG1(%esp)
- EXPORT_PC
- call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $$0xf,rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_quick.S b/runtime/interpreter/mterp/x86/op_iget_quick.S
deleted file mode 100644
index 1b7440f..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "load":"movl"}
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- ${load} (%ecx,%eax,1), %eax
- andb $$0xf,rINSTbl # rINST <- A
- SET_VREG %eax, rINST # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_short.S b/runtime/interpreter/mterp/x86/op_iget_short.S
deleted file mode 100644
index c7477f5..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_short_quick.S b/runtime/interpreter/mterp/x86/op_iget_short_quick.S
deleted file mode 100644
index 2c3aeb6..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
deleted file mode 100644
index da27df9..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * 64-bit instance field get.
- *
- * for: iget-wide
- */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- mov rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpIGetU64)
- mov rSELF, %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException # bail out
- andb $$0xf, rINSTbl # rINST <- A
- SET_VREG %eax, rINST
- SET_VREG_HIGH %edx, rINST
- RESTORE_IBASE_FROM_SELF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
deleted file mode 100644
index 7ce74cc..0000000
--- a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* iget-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- movzwl 2(rPC), %eax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movq (%ecx,%eax,1), %xmm0
- andb $$0xf, rINSTbl # rINST <- A
- SET_WIDE_FP_VREG %xmm0, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_instance_of.S b/runtime/interpreter/mterp/x86/op_instance_of.S
deleted file mode 100644
index e6fe5b2..0000000
--- a/runtime/interpreter/mterp/x86/op_instance_of.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- BBBB
- movl %eax, OUT_ARG0(%esp)
- movl rINST, %eax # eax <- BA
- sarl $$4, %eax # eax <- B
- leal VREG_ADDRESS(%eax), %ecx # Get object address
- movl %ecx, OUT_ARG1(%esp)
- movl OFF_FP_METHOD(rFP),%eax
- movl %eax, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- andb $$0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_int_to_byte.S b/runtime/interpreter/mterp/x86/op_int_to_byte.S
deleted file mode 100644
index b4e8d22..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movsbl %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_char.S b/runtime/interpreter/mterp/x86/op_int_to_char.S
deleted file mode 100644
index 4608971..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movzwl %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_double.S b/runtime/interpreter/mterp/x86/op_int_to_double.S
deleted file mode 100644
index 3e9921e..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildl","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_float.S b/runtime/interpreter/mterp/x86/op_int_to_float.S
deleted file mode 100644
index 849540d..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildl","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_int_to_long.S b/runtime/interpreter/mterp/x86/op_int_to_long.S
deleted file mode 100644
index 6f9ea26..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_long.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* int to long vA, vB */
- movzbl rINSTbl, %eax # eax <- +A
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- andb $$0xf, rINSTbl # rINST <- A
- movl rIBASE, %ecx # cltd trashes rIBASE/edx
- cltd # rINST:eax<- sssssssBBBBBBBB
- SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
- SET_VREG %eax, rINST # v[A+0] <- %eax
- movl %ecx, rIBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86/op_int_to_short.S b/runtime/interpreter/mterp/x86/op_int_to_short.S
deleted file mode 100644
index 90d0ae6..0000000
--- a/runtime/interpreter/mterp/x86/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom.S b/runtime/interpreter/mterp/x86/op_invoke_custom.S
deleted file mode 100644
index eddd5b3..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
deleted file mode 100644
index 1a4e884..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_direct.S b/runtime/interpreter/mterp/x86/op_invoke_direct.S
deleted file mode 100644
index 76fb9a6..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86/op_invoke_direct_range.S
deleted file mode 100644
index a6ab604..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_interface.S b/runtime/interpreter/mterp/x86/op_invoke_interface.S
deleted file mode 100644
index 91c24f5..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeInterface" }
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86/op_invoke_interface_range.S
deleted file mode 100644
index e478beb..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
deleted file mode 100644
index 3907689..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
deleted file mode 100644
index 59a8230..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_static.S b/runtime/interpreter/mterp/x86/op_invoke_static.S
deleted file mode 100644
index b4c1236..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/x86/op_invoke_static_range.S b/runtime/interpreter/mterp/x86/op_invoke_static_range.S
deleted file mode 100644
index 3dc8a26..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_super.S b/runtime/interpreter/mterp/x86/op_invoke_super.S
deleted file mode 100644
index be20edd..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeSuper" }
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_super_range.S b/runtime/interpreter/mterp/x86/op_invoke_super_range.S
deleted file mode 100644
index f36bf72..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual.S b/runtime/interpreter/mterp/x86/op_invoke_virtual.S
deleted file mode 100644
index 7e9c456..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtual" }
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S
deleted file mode 100644
index 2dc9ab6..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S
deleted file mode 100644
index d1d20d2..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 21bfc55..0000000
--- a/runtime/interpreter/mterp/x86/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86/op_iput.S b/runtime/interpreter/mterp/x86/op_iput.S
deleted file mode 100644
index 4c6603a..0000000
--- a/runtime/interpreter/mterp/x86/op_iput.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default { "helper":"MterpIPutU32" }
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), %eax # eax<- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $$4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $$0xf, rINSTbl # rINST<- A
- GET_VREG %eax, rINST
- movl %eax, OUT_ARG2(%esp) # fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL($helper)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_boolean.S b/runtime/interpreter/mterp/x86/op_iput_boolean.S
deleted file mode 100644
index fdd5303..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S
deleted file mode 100644
index 93865de..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_byte.S b/runtime/interpreter/mterp/x86/op_iput_byte.S
deleted file mode 100644
index b81850c..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86/op_iput_byte_quick.S
deleted file mode 100644
index 93865de..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_char.S b/runtime/interpreter/mterp/x86/op_iput_char.S
deleted file mode 100644
index dde3853..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_char_quick.S b/runtime/interpreter/mterp/x86/op_iput_char_quick.S
deleted file mode 100644
index 4ec8029..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_object.S b/runtime/interpreter/mterp/x86/op_iput_object.S
deleted file mode 100644
index 56e026e..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG3(%esp)
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_object_quick.S b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
deleted file mode 100644
index cb77929..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_object_quick.S
+++ /dev/null
@@ -1,11 +0,0 @@
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_quick.S b/runtime/interpreter/mterp/x86/op_iput_quick.S
deleted file mode 100644
index b67cee0..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "reg":"rINST", "store":"movl" }
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINST # rINST <- v[A]
- movzwl 2(rPC), %eax # eax <- field byte offset
- ${store} ${reg}, (%ecx,%eax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_short.S b/runtime/interpreter/mterp/x86/op_iput_short.S
deleted file mode 100644
index 130e875..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_short_quick.S b/runtime/interpreter/mterp/x86/op_iput_short_quick.S
deleted file mode 100644
index 4ec8029..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide.S b/runtime/interpreter/mterp/x86/op_iput_wide.S
deleted file mode 100644
index ea22b91..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_wide.S
+++ /dev/null
@@ -1,19 +0,0 @@
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- movzwl 2(rPC), %eax # eax <- 0000CCCC
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movzbl rINSTbl,%ecx # ecx <- BA
- sarl $$4,%ecx # ecx <- B
- GET_VREG %ecx, %ecx
- movl %ecx, OUT_ARG1(%esp) # the object pointer
- andb $$0xf,rINSTbl # rINST <- A
- leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG2(%esp) # &fp[A]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG3(%esp) # referrer
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jnz MterpPossibleException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
deleted file mode 100644
index 17de6f8..0000000
--- a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbl rINSTbl, %ecx # ecx<- BA
- sarl $$4, %ecx # ecx<- B
- GET_VREG %ecx, %ecx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwl 2(rPC), %eax # eax<- field byte offset
- leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
- andb $$0xf, rINSTbl # rINST<- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
- movq %xmm0, (%ecx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_long_to_double.S b/runtime/interpreter/mterp/x86/op_long_to_double.S
deleted file mode 100644
index 2c7f905..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildll","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_long_to_float.S b/runtime/interpreter/mterp/x86/op_long_to_float.S
deleted file mode 100644
index e500e39..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"load":"fildll","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_long_to_int.S b/runtime/interpreter/mterp/x86/op_long_to_int.S
deleted file mode 100644
index 1c39b96..0000000
--- a/runtime/interpreter/mterp/x86/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "x86/op_move.S"
diff --git a/runtime/interpreter/mterp/x86/op_monitor_enter.S b/runtime/interpreter/mterp/x86/op_monitor_enter.S
deleted file mode 100644
index b35c684..0000000
--- a/runtime/interpreter/mterp/x86/op_monitor_enter.S
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artLockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_monitor_exit.S b/runtime/interpreter/mterp/x86/op_monitor_exit.S
deleted file mode 100644
index 2d17d5e..0000000
--- a/runtime/interpreter/mterp/x86/op_monitor_exit.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG %ecx, rINST
- movl %ecx, OUT_ARG0(%esp)
- movl rSELF, %eax
- movl %eax, OUT_ARG1(%esp)
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- RESTORE_IBASE
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move.S b/runtime/interpreter/mterp/x86/op_move.S
deleted file mode 100644
index ea173b9..0000000
--- a/runtime/interpreter/mterp/x86/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $$0xf, %al # eax <- A
- shrl $$4, rINST # rINST <- B
- GET_VREG rINST, rINST
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_16.S b/runtime/interpreter/mterp/x86/op_move_16.S
deleted file mode 100644
index 454deb5..0000000
--- a/runtime/interpreter/mterp/x86/op_move_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwl 4(rPC), %ecx # ecx <- BBBB
- movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST, %ecx
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_exception.S b/runtime/interpreter/mterp/x86/op_move_exception.S
deleted file mode 100644
index d8dc74f..0000000
--- a/runtime/interpreter/mterp/x86/op_move_exception.S
+++ /dev/null
@@ -1,6 +0,0 @@
- /* move-exception vAA */
- movl rSELF, %ecx
- movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
- movl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_from16.S b/runtime/interpreter/mterp/x86/op_move_from16.S
deleted file mode 100644
index e869855..0000000
--- a/runtime/interpreter/mterp/x86/op_move_from16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzx rINSTbl, %eax # eax <- AA
- movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST, rINST # rINST <- fp[BBBB]
- .if $is_object
- SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
- .else
- SET_VREG rINST, %eax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_move_object.S b/runtime/interpreter/mterp/x86/op_move_object.S
deleted file mode 100644
index a6a7c90..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_object_16.S b/runtime/interpreter/mterp/x86/op_move_object_16.S
deleted file mode 100644
index e0c8527..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_object_from16.S b/runtime/interpreter/mterp/x86/op_move_object_from16.S
deleted file mode 100644
index e623820..0000000
--- a/runtime/interpreter/mterp/x86/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_result.S b/runtime/interpreter/mterp/x86/op_move_result.S
deleted file mode 100644
index f6f2129..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl (%eax), %eax # r0 <- result.i.
- .if $is_object
- SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINST # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_result_object.S b/runtime/interpreter/mterp/x86/op_move_result_object.S
deleted file mode 100644
index cbf5e1d..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_move_result_wide.S b/runtime/interpreter/mterp/x86/op_move_result_wide.S
deleted file mode 100644
index 7818cce..0000000
--- a/runtime/interpreter/mterp/x86/op_move_result_wide.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* move-result-wide vAA */
- movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
- movl 4(%eax), %ecx # Get high
- movl (%eax), %eax # Get low
- SET_VREG %eax, rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide.S b/runtime/interpreter/mterp/x86/op_move_wide.S
deleted file mode 100644
index 79ce7b7..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_16.S b/runtime/interpreter/mterp/x86/op_move_wide_16.S
deleted file mode 100644
index a6b8596..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 4(rPC), %ecx # ecx<- BBBB
- movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_from16.S b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
deleted file mode 100644
index ec344de..0000000
--- a/runtime/interpreter/mterp/x86/op_move_wide_from16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_double.S b/runtime/interpreter/mterp/x86/op_mul_double.S
deleted file mode 100644
index 7cef4c0..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86/op_mul_double_2addr.S
deleted file mode 100644
index bb722b6..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_float.S b/runtime/interpreter/mterp/x86/op_mul_float.S
deleted file mode 100644
index 1156230..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86/op_mul_float_2addr.S
deleted file mode 100644
index e9316df..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_mul_int.S b/runtime/interpreter/mterp/x86/op_mul_int.S
deleted file mode 100644
index 77f4659..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /*
- * 32-bit binary multiplication.
- */
- /* mul vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax, %eax # eax <- vBB
- mov rIBASE, LOCAL0(%esp)
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- mov LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
deleted file mode 100644
index da699ae..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /* mul vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $$0xf, %cl # ecx <- A
- movl rIBASE, rINST
- imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
- movl rINST, rIBASE
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
deleted file mode 100644
index 056f491..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* mul/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movzbl rINSTbl, %eax # eax <- 000000BA
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %eax # eax <- vB
- movl rIBASE, %ecx
- movswl 2(rPC), rIBASE # rIBASE <- ssssCCCC
- andb $$0xf, rINSTbl # rINST <- A
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
deleted file mode 100644
index 59b3844..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* mul/lit8 vAA, vBB, #+CC */
- movzbl 2(rPC), %eax # eax <- BB
- movl rIBASE, %ecx
- GET_VREG %eax, %eax # eax <- rBB
- movsbl 3(rPC), rIBASE # rIBASE <- ssssssCC
- imull rIBASE, %eax # trashes rIBASE/edx
- movl %ecx, rIBASE
- SET_VREG %eax, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long.S b/runtime/interpreter/mterp/x86/op_mul_long.S
deleted file mode 100644
index f35ca13..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_long.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Signed 64-bit integer multiply.
- *
- * We could definately use more free registers for
- * this code. We spill rINSTw (ebx),
- * giving us eax, ebc, ecx and edx as computational
- * temps. On top of that, we'll spill edi (rFP)
- * for use as the vB pointer and esi (rPC) for use
- * as the vC pointer. Yuck.
- *
- */
- /* mul-long vAA, vBB, vCC */
- movzbl 2(rPC), %eax # eax <- B
- movzbl 3(rPC), %ecx # ecx <- C
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[B]
- leal (rFP,%ecx,4), rFP # rFP <- &v[C]
- movl 4(%esi), %ecx # ecx <- Bmsw
- imull (rFP), %ecx # ecx <- (Bmsw*Clsw)
- movl 4(rFP), %eax # eax <- Cmsw
- imull (%esi), %eax # eax <- (Cmsw*Blsw)
- addl %eax, %ecx # ecx <- (Bmsw*Clsw)+(Cmsw*Blsw)
- movl (rFP), %eax # eax <- Clsw
- mull (%esi) # eax <- (Clsw*Alsw)
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL1(%esp), rFP # restore FP
- leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
- SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
- mov LOCAL2(%esp), rIBASE # restore IBASE
- SET_VREG %eax, rINST # v[B] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86/op_mul_long_2addr.S
deleted file mode 100644
index 565a57c..0000000
--- a/runtime/interpreter/mterp/x86/op_mul_long_2addr.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Signed 64-bit integer multiply, 2-addr version
- *
- * We could definately use more free registers for
- * this code. We must spill %edx (rIBASE) because it
- * is used by imul. We'll also spill rINST (ebx),
- * giving us eax, ebc, ecx and rIBASE as computational
- * temps. On top of that, we'll spill %esi (edi)
- * for use as the vA pointer and rFP (esi) for use
- * as the vB pointer. Yuck.
- */
- /* mul-long/2addr vA, vB */
- movzbl rINSTbl, %eax # eax <- BA
- andb $$0xf, %al # eax <- A
- CLEAR_WIDE_REF %eax # clear refs in advance
- sarl $$4, rINST # rINST <- B
- mov rPC, LOCAL0(%esp) # save Interpreter PC
- mov rFP, LOCAL1(%esp) # save FP
- mov rIBASE, LOCAL2(%esp) # save rIBASE
- leal (rFP,%eax,4), %esi # esi <- &v[A]
- leal (rFP,rINST,4), rFP # rFP <- &v[B]
- movl 4(%esi), %ecx # ecx <- Amsw
- imull (rFP), %ecx # ecx <- (Amsw*Blsw)
- movl 4(rFP), %eax # eax <- Bmsw
- imull (%esi), %eax # eax <- (Bmsw*Alsw)
- addl %eax, %ecx # ecx <- (Amsw*Blsw)+(Bmsw*Alsw)
- movl (rFP), %eax # eax <- Blsw
- mull (%esi) # eax <- (Blsw*Alsw)
- leal (%ecx,rIBASE), rIBASE # full result now in %edx:%eax
- movl rIBASE, 4(%esi) # v[A+1] <- rIBASE
- movl %eax, (%esi) # v[A] <- %eax
- mov LOCAL0(%esp), rPC # restore Interpreter PC
- mov LOCAL2(%esp), rIBASE # restore IBASE
- mov LOCAL1(%esp), rFP # restore FP
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_neg_double.S b/runtime/interpreter/mterp/x86/op_neg_double.S
deleted file mode 100644
index fac4322..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"instr":"fchs","load":"fldl","store":"fstpl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_float.S b/runtime/interpreter/mterp/x86/op_neg_float.S
deleted file mode 100644
index 30f071b..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/fpcvt.S" {"instr":"fchs","load":"flds","store":"fstps"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_int.S b/runtime/interpreter/mterp/x86/op_neg_int.S
deleted file mode 100644
index 67d4d18..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"negl %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_neg_long.S b/runtime/interpreter/mterp/x86/op_neg_long.S
deleted file mode 100644
index 30da247..0000000
--- a/runtime/interpreter/mterp/x86/op_neg_long.S
+++ /dev/null
@@ -1,13 +0,0 @@
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- negl %eax
- adcl $$0, %ecx
- negl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86/op_new_array.S b/runtime/interpreter/mterp/x86/op_new_array.S
deleted file mode 100644
index 16226e9..0000000
--- a/runtime/interpreter/mterp/x86/op_new_array.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpNewArray)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_new_instance.S b/runtime/interpreter/mterp/x86/op_new_instance.S
deleted file mode 100644
index f976acc..0000000
--- a/runtime/interpreter/mterp/x86/op_new_instance.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- call SYMBOL(MterpNewInstance)
- RESTORE_IBASE
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_nop.S b/runtime/interpreter/mterp/x86/op_nop.S
deleted file mode 100644
index 4cb68e3..0000000
--- a/runtime/interpreter/mterp/x86/op_nop.S
+++ /dev/null
@@ -1 +0,0 @@
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_not_int.S b/runtime/interpreter/mterp/x86/op_not_int.S
deleted file mode 100644
index 335ab09..0000000
--- a/runtime/interpreter/mterp/x86/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unop.S" {"instr":"notl %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_not_long.S b/runtime/interpreter/mterp/x86/op_not_long.S
deleted file mode 100644
index 8f706e1..0000000
--- a/runtime/interpreter/mterp/x86/op_not_long.S
+++ /dev/null
@@ -1,11 +0,0 @@
- /* unop vA, vB */
- movzbl rINSTbl, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
- notl %eax
- notl %ecx
- SET_VREG %eax, rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_or_int.S b/runtime/interpreter/mterp/x86/op_or_int.S
deleted file mode 100644
index ebe2ec2..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"orl (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_2addr.S b/runtime/interpreter/mterp/x86/op_or_int_2addr.S
deleted file mode 100644
index 36c17db..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"orl %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_lit16.S b/runtime/interpreter/mterp/x86/op_or_int_lit16.S
deleted file mode 100644
index 0a88ff59..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_int_lit8.S b/runtime/interpreter/mterp/x86/op_or_int_lit8.S
deleted file mode 100644
index 0670b67..0000000
--- a/runtime/interpreter/mterp/x86/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_long.S b/runtime/interpreter/mterp/x86/op_or_long.S
deleted file mode 100644
index 09ca539..0000000
--- a/runtime/interpreter/mterp/x86/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"orl (rFP,%ecx,4), rIBASE", "instr2":"orl 4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_or_long_2addr.S b/runtime/interpreter/mterp/x86/op_or_long_2addr.S
deleted file mode 100644
index 2062e81..0000000
--- a/runtime/interpreter/mterp/x86/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"orl %eax, (rFP,rINST,4)","instr2":"orl %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_packed_switch.S b/runtime/interpreter/mterp/x86/op_packed_switch.S
deleted file mode 100644
index fcb7509..0000000
--- a/runtime/interpreter/mterp/x86/op_packed_switch.S
+++ /dev/null
@@ -1,21 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax, rINST # eax <- vAA
- leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
- movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call SYMBOL($func)
- REFRESH_IBASE
- testl %eax, %eax
- movl %eax, rINST
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86/op_rem_double.S b/runtime/interpreter/mterp/x86/op_rem_double.S
deleted file mode 100644
index 4b52a06..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_double.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* rem_double vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- fldl VREG_ADDRESS(%ecx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%eax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINST) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86/op_rem_double_2addr.S
deleted file mode 100644
index 5a0e669..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_double_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* rem_double/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINST) # vB to fp stack
- andb $$0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_WIDE_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_rem_float.S b/runtime/interpreter/mterp/x86/op_rem_float.S
deleted file mode 100644
index 05e0bf1..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_float.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* rem_float vAA, vBB, vCC */
- movzbl 3(rPC), %ecx # ecx <- BB
- movzbl 2(rPC), %eax # eax <- CC
- flds VREG_ADDRESS(%ecx) # vBB to fp stack
- flds VREG_ADDRESS(%eax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINST) # %st to vAA
- CLEAR_REF rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86/op_rem_float_2addr.S
deleted file mode 100644
index 29f84e6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_float_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* rem_float/2addr vA, vB */
- movzx rINSTbl, %ecx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- flds VREG_ADDRESS(rINST) # vB to fp stack
- andb $$0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%ecx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%ecx) # %st to vA
- CLEAR_REF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_rem_int.S b/runtime/interpreter/mterp/x86/op_rem_int.S
deleted file mode 100644
index d25b93c..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv.S" {"result":"rIBASE","special":"$0","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86/op_rem_int_2addr.S
deleted file mode 100644
index c788e0e..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindiv2addr.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86/op_rem_int_lit16.S
deleted file mode 100644
index 3df9d39..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit16.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86/op_rem_int_lit8.S
deleted file mode 100644
index 56e19c6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/bindivLit8.S" {"result":"rIBASE","special":"$0"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_long.S b/runtime/interpreter/mterp/x86/op_rem_long.S
deleted file mode 100644
index 0ffe1f6..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_div_long.S" {"routine":"art_quick_lmod"}
diff --git a/runtime/interpreter/mterp/x86/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86/op_rem_long_2addr.S
deleted file mode 100644
index 4b97735..0000000
--- a/runtime/interpreter/mterp/x86/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_div_long_2addr.S" {"routine":"art_quick_lmod"}
diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S
deleted file mode 100644
index a8ebbed..0000000
--- a/runtime/interpreter/mterp/x86/op_return.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- vAA
- xorl %ecx, %ecx
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_object.S b/runtime/interpreter/mterp/x86/op_return_object.S
deleted file mode 100644
index 12c84b3..0000000
--- a/runtime/interpreter/mterp/x86/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_return.S"
diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S
deleted file mode 100644
index d9eddf3..0000000
--- a/runtime/interpreter/mterp/x86/op_return_void.S
+++ /dev/null
@@ -1,11 +0,0 @@
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
deleted file mode 100644
index 2fbda6b..0000000
--- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,9 +0,0 @@
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- xorl %eax, %eax
- xorl %ecx, %ecx
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S
deleted file mode 100644
index 5fff626..0000000
--- a/runtime/interpreter/mterp/x86/op_return_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movl rSELF, %eax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
- jz 1f
- movl %eax, OUT_ARG0(%esp)
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_rsub_int.S b/runtime/interpreter/mterp/x86/op_rsub_int.S
deleted file mode 100644
index d6449c6..0000000
--- a/runtime/interpreter/mterp/x86/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "x86/binopLit16.S" {"instr":"subl %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S
deleted file mode 100644
index 15d0e35..0000000
--- a/runtime/interpreter/mterp/x86/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"subl %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
deleted file mode 100644
index 66c7b0b..0000000
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32" }
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
- */
- /* op vAA, field@BBBB */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL($helper)
- movl rSELF, %ecx
- RESTORE_IBASE_FROM_SELF %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- .if $is_object
- SET_VREG_OBJECT %eax, rINST # fp[A] <- value
- .else
- SET_VREG %eax, rINST # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sget_boolean.S b/runtime/interpreter/mterp/x86/op_sget_boolean.S
deleted file mode 100644
index 3936eea..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_byte.S b/runtime/interpreter/mterp/x86/op_sget_byte.S
deleted file mode 100644
index 967586d..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_char.S b/runtime/interpreter/mterp/x86/op_sget_char.S
deleted file mode 100644
index b706f18..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_object.S b/runtime/interpreter/mterp/x86/op_sget_object.S
deleted file mode 100644
index eac8836..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_short.S b/runtime/interpreter/mterp/x86/op_sget_short.S
deleted file mode 100644
index ee058a6..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
deleted file mode 100644
index 994cc3a..0000000
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * SGET_WIDE handler wrapper.
- *
- */
- /* sget-wide vAA, field@BBBB */
- .extern MterpSGetU64
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref CCCC
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG1(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG2(%esp) # self
- call SYMBOL(MterpSGetU64)
- movl rSELF, %ecx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
- jnz MterpException
- SET_VREG %eax, rINST # fp[A]<- low part
- SET_VREG_HIGH %edx, rINST # fp[A+1]<- high part
- RESTORE_IBASE_FROM_SELF %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_int.S b/runtime/interpreter/mterp/x86/op_shl_int.S
deleted file mode 100644
index 6a41d1c..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86/op_shl_int_2addr.S
deleted file mode 100644
index 72abb8e..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86/op_shl_int_lit8.S
deleted file mode 100644
index b8d6069..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shl_long.S b/runtime/interpreter/mterp/x86/op_shl_long.S
deleted file mode 100644
index aa58a93..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shl-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rINST */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shldl %eax,rIBASE
- sall %cl, %eax
- testb $$32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- %eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
deleted file mode 100644
index 6bbf49c..0000000
--- a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $$4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shldl %eax, rIBASE
- sall %cl, %eax
- testb $$32, %cl
- je 2f
- movl %eax, rIBASE
- xorl %eax, %eax
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_shr_int.S b/runtime/interpreter/mterp/x86/op_shr_int.S
deleted file mode 100644
index 687b2c3..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86/op_shr_int_2addr.S
deleted file mode 100644
index 533b0e9..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86/op_shr_int_lit8.S
deleted file mode 100644
index ebd1bea..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_shr_long.S b/runtime/interpreter/mterp/x86/op_shr_long.S
deleted file mode 100644
index 68aa0ee..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $$32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $$31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
deleted file mode 100644
index 148bd1b..0000000
--- a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $$4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- sarl %cl, rIBASE
- testb $$32, %cl
- je 2f
- movl rIBASE, %eax
- sarl $$31, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_sparse_switch.S b/runtime/interpreter/mterp/x86/op_sparse_switch.S
deleted file mode 100644
index fdaec47..0000000
--- a/runtime/interpreter/mterp/x86/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
deleted file mode 100644
index e99e7a7..0000000
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default { "helper":"MterpSPutU32"}
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST, rINST
- movl rINST, OUT_ARG1(%esp) # fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL($helper)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sput_boolean.S b/runtime/interpreter/mterp/x86/op_sput_boolean.S
deleted file mode 100644
index c6aa7c4..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_byte.S b/runtime/interpreter/mterp/x86/op_sput_byte.S
deleted file mode 100644
index fd504a8..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_char.S b/runtime/interpreter/mterp/x86/op_sput_char.S
deleted file mode 100644
index b4d0997..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_object.S b/runtime/interpreter/mterp/x86/op_sput_object.S
deleted file mode 100644
index 941b072..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
- EXPORT_PC
- leal OFF_FP_SHADOWFRAME(rFP), %eax
- movl %eax, OUT_ARG0(%esp)
- movl rPC, OUT_ARG1(%esp)
- REFRESH_INST ${opnum}
- movl rINST, OUT_ARG2(%esp)
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp)
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sput_short.S b/runtime/interpreter/mterp/x86/op_sput_short.S
deleted file mode 100644
index eba01bd..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
deleted file mode 100644
index f581507..0000000
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- movzwl 2(rPC), %eax
- movl %eax, OUT_ARG0(%esp) # field ref BBBB
- leal VREG_ADDRESS(rINST), %eax
- movl %eax, OUT_ARG1(%esp) # &fp[AA]
- movl OFF_FP_METHOD(rFP), %eax
- movl %eax, OUT_ARG2(%esp) # referrer
- movl rSELF, %ecx
- movl %ecx, OUT_ARG3(%esp) # self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jnz MterpException
- RESTORE_IBASE
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sub_double.S b/runtime/interpreter/mterp/x86/op_sub_double.S
deleted file mode 100644
index e83afeb..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86/op_sub_double_2addr.S
deleted file mode 100644
index af9a2ab..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_float.S b/runtime/interpreter/mterp/x86/op_sub_float.S
deleted file mode 100644
index 423d834..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86/op_sub_float_2addr.S
deleted file mode 100644
index 18de000..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_int.S b/runtime/interpreter/mterp/x86/op_sub_int.S
deleted file mode 100644
index 7fe03fb..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"subl (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86/op_sub_int_2addr.S
deleted file mode 100644
index cc9bf60..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"subl %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_long.S b/runtime/interpreter/mterp/x86/op_sub_long.S
deleted file mode 100644
index 014591e..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"subl (rFP,%ecx,4), rIBASE", "instr2":"sbbl 4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86/op_sub_long_2addr.S
deleted file mode 100644
index 7498029..0000000
--- a/runtime/interpreter/mterp/x86/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"subl %eax, (rFP,rINST,4)","instr2":"sbbl %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_throw.S b/runtime/interpreter/mterp/x86/op_throw.S
deleted file mode 100644
index a6e6b1e..0000000
--- a/runtime/interpreter/mterp/x86/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINST # eax<- vAA (exception object)
- testl %eax, %eax
- jz common_errNullObject
- movl rSELF,%ecx
- movl %eax, THREAD_EXCEPTION_OFFSET(%ecx)
- jmp MterpException
diff --git a/runtime/interpreter/mterp/x86/op_unused_3e.S b/runtime/interpreter/mterp/x86/op_unused_3e.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_3f.S b/runtime/interpreter/mterp/x86/op_unused_3f.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_40.S b/runtime/interpreter/mterp/x86/op_unused_40.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_41.S b/runtime/interpreter/mterp/x86/op_unused_41.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_42.S b/runtime/interpreter/mterp/x86/op_unused_42.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_43.S b/runtime/interpreter/mterp/x86/op_unused_43.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_79.S b/runtime/interpreter/mterp/x86/op_unused_79.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_7a.S b/runtime/interpreter/mterp/x86/op_unused_7a.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f3.S b/runtime/interpreter/mterp/x86/op_unused_f3.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f4.S b/runtime/interpreter/mterp/x86/op_unused_f4.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f5.S b/runtime/interpreter/mterp/x86/op_unused_f5.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f6.S b/runtime/interpreter/mterp/x86/op_unused_f6.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f7.S b/runtime/interpreter/mterp/x86/op_unused_f7.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f8.S b/runtime/interpreter/mterp/x86/op_unused_f8.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_f9.S b/runtime/interpreter/mterp/x86/op_unused_f9.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_fc.S b/runtime/interpreter/mterp/x86/op_unused_fc.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_unused_fd.S b/runtime/interpreter/mterp/x86/op_unused_fd.S
deleted file mode 100644
index 31d98c1..0000000
--- a/runtime/interpreter/mterp/x86/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/unused.S"
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int.S b/runtime/interpreter/mterp/x86/op_ushr_int.S
deleted file mode 100644
index dfe25ff..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop1.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S
deleted file mode 100644
index c14bc98..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/shop2addr.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S
deleted file mode 100644
index e129f6b..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long.S b/runtime/interpreter/mterp/x86/op_ushr_long.S
deleted file mode 100644
index 9527c9c..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_long.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Long integer shift. This is different from the generic 32/64-bit
- * binary operations because vAA/vBB are 64-bit but vCC (the shift
- * distance) is 32-bit. Also, Dalvik requires us to mask off the low
- * 6 bits of the shift distance. x86 shifts automatically mask off
- * the low 5 bits of %cl, so have to handle the 64 > shiftcount > 31
- * case specially.
- */
- /* shr-long vAA, vBB, vCC */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl 2(rPC), %eax # eax <- BB
- movzbl 3(rPC), %ecx # ecx <- CC
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
- GET_VREG %ecx, %ecx # ecx <- vCC
- GET_VREG %eax, %eax # eax <- v[BB+0]
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $$32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[BB+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
deleted file mode 100644
index 72fcc36..0000000
--- a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
- * 32-bit shift distance.
- */
- /* shl-long/2addr vA, vB */
- /* ecx gets shift count */
- /* Need to spill rIBASE */
- /* rINSTw gets AA */
- movzbl rINSTbl, %ecx # ecx <- BA
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- v[AA+0]
- sarl $$4, %ecx # ecx <- B
- movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx, %ecx # ecx <- vBB
- shrdl rIBASE, %eax
- shrl %cl, rIBASE
- testb $$32, %cl
- je 2f
- movl rIBASE, %eax
- xorl rIBASE, rIBASE
-2:
- SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
- movl LOCAL0(%esp), rIBASE
- SET_VREG %eax, rINST # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_xor_int.S b/runtime/interpreter/mterp/x86/op_xor_int.S
deleted file mode 100644
index 35aca6a..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop.S" {"instr":"xorl (rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86/op_xor_int_2addr.S
deleted file mode 100644
index d7b70e2..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binop2addr.S" {"instr":"xorl %eax, (rFP,%ecx,4)"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86/op_xor_int_lit16.S
deleted file mode 100644
index 115f0a0..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit16.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86/op_xor_int_lit8.S
deleted file mode 100644
index 243971c..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopLit8.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_long.S b/runtime/interpreter/mterp/x86/op_xor_long.S
deleted file mode 100644
index 0d3c0f5..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide.S" {"instr1":"xorl (rFP,%ecx,4), rIBASE", "instr2":"xorl 4(rFP,%ecx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86/op_xor_long_2addr.S
deleted file mode 100644
index b5000e4..0000000
--- a/runtime/interpreter/mterp/x86/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86/binopWide2addr.S" {"instr1":"xorl %eax, (rFP,rINST,4)","instr2":"xorl %ecx, 4(rFP,rINST,4)"}
diff --git a/runtime/interpreter/mterp/x86/other.S b/runtime/interpreter/mterp/x86/other.S
new file mode 100644
index 0000000..5de3381
--- /dev/null
+++ b/runtime/interpreter/mterp/x86/other.S
@@ -0,0 +1,318 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp)
+ call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+%def op_const():
+ /* const vAA, #+BBBBbbbb */
+ movl 2(rPC), %eax # grab all 32 bits at once
+ SET_VREG %eax, rINST # vAA<- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_16():
+ /* const/16 vAA, #+BBBB */
+ movswl 2(rPC), %ecx # ecx <- ssssBBBB
+ SET_VREG %ecx, rINST # vAA <- ssssBBBB
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_4():
+ /* const/4 vA, #+B */
+ movsx rINSTbl, %eax # eax <-ssssssBx
+ movl $$0xf, rINST
+ andl %eax, rINST # rINST <- A
+ sarl $$4, %eax
+ SET_VREG %eax, rINST
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, #+BBBB0000 */
+ movzwl 2(rPC), %eax # eax <- 0000BBBB
+ sall $$16, %eax # eax <- BBBB0000
+ SET_VREG %eax, rINST # vAA <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), %eax # eax <- BBBB
+ movl %eax, OUT_ARG0(%esp)
+ movl rINST, OUT_ARG1(%esp)
+ leal OFF_FP_SHADOWFRAME(rFP), %eax
+ movl %eax, OUT_ARG2(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG3(%esp)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide():
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ movl 2(rPC), %eax # eax <- lsw
+ movzbl rINSTbl, %ecx # ecx <- AA
+ movl 6(rPC), rINST # rINST <- msw
+ SET_VREG %eax, %ecx
+ SET_VREG_HIGH rINST, %ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, #+BBBB */
+ movswl 2(rPC), %eax # eax <- ssssBBBB
+ movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
+ cltd # rIBASE:eax <- ssssssssssssBBBB
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
+ movl %ecx, rIBASE # restore rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ movl 2(rPC), %eax # eax <- BBBBbbbb
+ movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
+ cltd # rIBASE:eax <- ssssssssssssBBBB
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
+ movl %ecx, rIBASE # restore rIBASE
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ movzwl 2(rPC), %eax # eax <- 0000BBBB
+ sall $$16, %eax # eax <- BBBB0000
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
+ xorl %eax, %eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_monitor_enter():
+/*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ GET_VREG %ecx, rINST
+ movl %ecx, OUT_ARG0(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG1(%esp)
+ call SYMBOL(artLockObjectFromCode) # (object, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_monitor_exit():
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ GET_VREG %ecx, rINST
+ movl %ecx, OUT_ARG0(%esp)
+ movl rSELF, %eax
+ movl %eax, OUT_ARG1(%esp)
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
+ RESTORE_IBASE
+ testb %al, %al
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movzbl rINSTbl, %eax # eax <- BA
+ andb $$0xf, %al # eax <- A
+ shrl $$4, rINST # rINST <- B
+ GET_VREG rINST, rINST
+ .if $is_object
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
+ .else
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwl 4(rPC), %ecx # ecx <- BBBB
+ movzwl 2(rPC), %eax # eax <- AAAA
+ GET_VREG rINST, %ecx
+ .if $is_object
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
+ .else
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_exception():
+ /* move-exception vAA */
+ movl rSELF, %ecx
+ movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
+ SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
+ movl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzx rINSTbl, %eax # eax <- AA
+ movw 2(rPC), rINSTw # rINSTw <- BBBB
+ GET_VREG rINST, rINST # rINST <- fp[BBBB]
+ .if $is_object
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
+ .else
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
+ movl (%eax), %eax # r0 <- result.i.
+ .if $is_object
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
+ .else
+ SET_VREG %eax, rINST # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* move-result-wide vAA */
+ movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
+ movl 4(%eax), %ecx # Get high
+ movl (%eax), %eax # Get low
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzbl rINSTbl, %ecx # ecx <- BA
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 4(rPC), %ecx # ecx<- BBBB
+ movzwl 2(rPC), %eax # eax<- AAAA
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 2(rPC), %ecx # ecx <- BBBB
+ movzbl rINSTbl, %eax # eax <- AAAA
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_nop():
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/x86/shop2addr.S b/runtime/interpreter/mterp/x86/shop2addr.S
deleted file mode 100644
index 96c9954..0000000
--- a/runtime/interpreter/mterp/x86/shop2addr.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movzx rINSTbl, %ecx # eax <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %ecx # eax <- vBB
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax, rINST # eax <- vAA
- $instr # ex: sarl %cl, %eax
- SET_VREG $result, rINST
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/sseBinop.S b/runtime/interpreter/mterp/x86/sseBinop.S
deleted file mode 100644
index 63a1e21..0000000
--- a/runtime/interpreter/mterp/x86/sseBinop.S
+++ /dev/null
@@ -1,9 +0,0 @@
-%default {"instr":"","suff":""}
- movzbl 2(rPC), %ecx # ecx <- BB
- movzbl 3(rPC), %eax # eax <- CC
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- ${instr}${suff} VREG_ADDRESS(%eax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/sseBinop2Addr.S b/runtime/interpreter/mterp/x86/sseBinop2Addr.S
deleted file mode 100644
index d157e67..0000000
--- a/runtime/interpreter/mterp/x86/sseBinop2Addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
-%default {"instr":"","suff":""}
- movzx rINSTbl, %ecx # ecx <- A+
- andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
- sarl $$4, rINST # rINST<- B
- ${instr}${suff} VREG_ADDRESS(rINST), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unop.S b/runtime/interpreter/mterp/x86/unop.S
deleted file mode 100644
index db09fc0..0000000
--- a/runtime/interpreter/mterp/x86/unop.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default {"instr":""}
-/*
- * Generic 32-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movzbl rINSTbl,%ecx # ecx <- A+
- sarl $$4,rINST # rINST <- B
- GET_VREG %eax, rINST # eax <- vB
- andb $$0xf,%cl # ecx <- A
- $instr
- SET_VREG %eax, %ecx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unused.S b/runtime/interpreter/mterp/x86/unused.S
deleted file mode 100644
index c95ef94..0000000
--- a/runtime/interpreter/mterp/x86/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
diff --git a/runtime/interpreter/mterp/x86/zcmp.S b/runtime/interpreter/mterp/x86/zcmp.S
deleted file mode 100644
index c116159..0000000
--- a/runtime/interpreter/mterp/x86/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $$0, VREG_ADDRESS(rINST) # compare (vA, 0)
- j${revcmp} 1f
- movswl 2(rPC), rINST # fetch signed displacement
- testl rINST, rINST
- jmp MterpCommonTakenBranch
-1:
- cmpw $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/alt_stub.S b/runtime/interpreter/mterp/x86_64/alt_stub.S
deleted file mode 100644
index 24cd1a8..0000000
--- a/runtime/interpreter/mterp/x86_64/alt_stub.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
- * any interesting requests and then jump to the real instruction
- * handler. Unlike the Arm handler, we can't do this as a tail call
- * because rIBASE is caller save and we need to reload it.
- *
- * Note that unlike in the Arm implementation, we should never arrive
- * here with a zero breakFlag because we always refresh rIBASE on
- * return.
- */
- .extern MterpCheckBefore
- REFRESH_IBASE
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- call SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
- jmp .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
new file mode 100644
index 0000000..263f82b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -0,0 +1,575 @@
+%def bindiv(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ .if $wide
+ GET_WIDE_VREG %rax, %rax # eax <- vBB
+ GET_WIDE_VREG $second, %rcx # ecx <- vCC
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ GET_VREG $second, %rcx # ecx <- vCC
+ .endif
+ test${suffix} $second, $second
+ jz common_errDivideByZero
+ cmp${suffix} $$-1, $second
+ je 2f
+ $ext # rdx:rax <- sign-extended of rax
+ idiv${suffix} $second
+1:
+ .if $wide
+ SET_WIDE_VREG $result, rINSTq # eax <- vBB
+ .else
+ SET_VREG $result, rINSTq # eax <- vBB
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xor${suffix} $result, $result
+ .else
+ neg${suffix} $result
+ .endif
+ jmp 1b
+
+%def bindiv2addr(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/2addr vA, vB */
+ movl rINST, %ecx # rcx <- BA
+ sarl $$4, %ecx # rcx <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ .if $wide
+ GET_WIDE_VREG %rax, rINSTq # eax <- vA
+ GET_WIDE_VREG $second, %rcx # ecx <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vA
+ GET_VREG $second, %rcx # ecx <- vB
+ .endif
+ test${suffix} $second, $second
+ jz common_errDivideByZero
+ cmp${suffix} $$-1, $second
+ je 2f
+ $ext # rdx:rax <- sign-extended of rax
+ idiv${suffix} $second
+1:
+ .if $wide
+ SET_WIDE_VREG $result, rINSTq # vA <- result
+ .else
+ SET_VREG $result, rINSTq # vA <- result
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+2:
+ .if $rem
+ xor${suffix} $result, $result
+ .else
+ neg${suffix} $result
+ .endif
+ jmp 1b
+
+%def bindivLit16(result="", rem="0"):
+/*
+ * 32-bit binary div/rem operation. Handles special case of op1=-1.
+ */
+ /* div/rem/lit16 vA, vB, #+CCCC */
+ /* Need A in rINST, ssssCCCC in ecx, vB in eax */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ andb $$0xf, rINSTbl # rINST <- A
+ testl %ecx, %ecx
+ jz common_errDivideByZero
+ cmpl $$-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG $result, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xorl $result, $result
+ .else
+ negl $result
+ .endif
+ jmp 1b
+
+%def bindivLit8(result="", rem="0"):
+/*
+ * 32-bit div/rem "lit8" binary operation. Handles special case of
+ * op0=minint & op1=-1
+ */
+ /* div/rem/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movsbl 3(rPC), %ecx # ecx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ testl %ecx, %ecx
+ je common_errDivideByZero
+ cmpl $$-1, %ecx
+ je 2f
+ cdq # rax <- sign-extended of eax
+ idivl %ecx
+1:
+ SET_VREG $result, rINSTq # vA <- result
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+ .if $rem
+ xorl $result, $result
+ .else
+ negl $result
+ .endif
+ jmp 1b
+
+%def binop(result="%eax", instr=""):
+/*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int, sub-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB
+ $instr # ex: addl VREG_ADDRESS(%rcx),%eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop1(wide="0", instr=""):
+/*
+ * Generic 32-bit binary operation in which both operands loaded to
+ * registers (op0 in eax, op1 in ecx).
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %ecx, %rcx # eax <- vCC
+ .if $wide
+ GET_WIDE_VREG %rax, %rax # rax <- vBB
+ $instr # ex: addl %ecx,%eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, %rax # eax <- vBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binop2addr(result="%eax", instr=""):
+/*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an instruction or a function call.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_VREG %eax, rINSTq # eax <- vB
+ $instr # for ex: addl %eax,(rFP,%ecx,4)
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def binopLit16(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than eax, you can override "result".)
+ *
+ * For: add-int/lit16, rsub-int,
+ * and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ movl rINST, %eax # rax <- 000000BA
+ sarl $$4, %eax # eax <- B
+ GET_VREG %eax, %rax # eax <- vB
+ andb $$0xf, rINSTbl # rINST <- A
+ movswl 2(rPC), %ecx # ecx <- ssssCCCC
+ $instr # for example: addl %ecx, %eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopLit8(result="%eax", instr=""):
+/*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = eax op ecx".
+ * This could be an x86 instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * For: add-int/lit8, rsub-int/lit8
+ * and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movsbl 3(rPC), %ecx # rcx <- ssssssCC
+ GET_VREG %eax, %rax # eax <- rBB
+ $instr # ex: addl %ecx,%eax
+ SET_VREG $result, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide(instr=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rax, %rax # rax <- v[BB]
+ $instr # ex: addq VREG_ADDRESS(%rcx),%rax
+ SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def binopWide2addr(instr=""):
+/*
+ * Generic 64-bit binary operation.
+ */
+ /* binop/2addr vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ $instr # for ex: addq %rax,VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def cvtfp_int(fp_suffix="", i_suffix="", max_const="", result_reg="", wide=""):
+/* On fp to int conversions, Java requires that
+ * if the result > maxint, it should be clamped to maxint. If it is less
+ * than minint, it should be clamped to minint. If it is a nan, the result
+ * should be zero. Further, the rounding mode is to truncate.
+ */
+ /* float/double to int/long vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ mov${i_suffix} ${max_const}, ${result_reg}
+ cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
+ comis${fp_suffix} %xmm1, %xmm0
+ jae 1f
+ jp 2f
+ cvtts${fp_suffix}2si${i_suffix} %xmm0, ${result_reg}
+ jmp 1f
+2:
+ xor${i_suffix} ${result_reg}, ${result_reg}
+1:
+ .if $wide
+ SET_WIDE_VREG ${result_reg}, %rcx
+ .else
+ SET_VREG ${result_reg}, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def shop2addr(wide="0", instr=""):
+/*
+ * Generic 32-bit "shift/2addr" operation.
+ */
+ /* shift/2addr vA, vB */
+ movl rINST, %ecx # ecx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # ecx <- vBB
+ andb $$0xf, rINSTbl # rINST <- A
+ .if $wide
+ GET_WIDE_VREG %rax, rINSTq # rax <- vAA
+ $instr # ex: sarl %cl, %eax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ GET_VREG %eax, rINSTq # eax <- vAA
+ $instr # ex: sarl %cl, %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def unop(preinstr="", instr="", wide="0"):
+/*
+ * Generic 32/64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op eax".
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4,rINST # rINST <- B
+ .if ${wide}
+ GET_WIDE_VREG %rax, rINSTq # rax <- vB
+ .else
+ GET_VREG %eax, rINSTq # eax <- vB
+ .endif
+ andb $$0xf,%cl # ecx <- A
+$preinstr
+$instr
+ .if ${wide}
+ SET_WIDE_VREG %rax, %rcx
+ .else
+ SET_VREG %eax, %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_int():
+% binop(instr="addl VREG_ADDRESS(%rcx), %eax")
+
+%def op_add_int_2addr():
+% binop2addr(instr="addl %eax, VREG_ADDRESS(%rcx)")
+
+%def op_add_int_lit16():
+% binopLit16(instr="addl %ecx, %eax")
+
+%def op_add_int_lit8():
+% binopLit8(instr="addl %ecx, %eax")
+
+%def op_add_long():
+% binopWide(instr="addq VREG_ADDRESS(%rcx), %rax")
+
+%def op_add_long_2addr():
+% binopWide2addr(instr="addq %rax, VREG_ADDRESS(%rcx)")
+
+%def op_and_int():
+% binop(instr="andl VREG_ADDRESS(%rcx), %eax")
+
+%def op_and_int_2addr():
+% binop2addr(instr="andl %eax, VREG_ADDRESS(%rcx)")
+
+%def op_and_int_lit16():
+% binopLit16(instr="andl %ecx, %eax")
+
+%def op_and_int_lit8():
+% binopLit8(instr="andl %ecx, %eax")
+
+%def op_and_long():
+% binopWide(instr="andq VREG_ADDRESS(%rcx), %rax")
+
+%def op_and_long_2addr():
+% binopWide2addr(instr="andq %rax, VREG_ADDRESS(%rcx)")
+
+%def op_cmp_long():
+/*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ */
+ /* cmp-long vAA, vBB, vCC */
+ movzbq 2(rPC), %rdx # edx <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
+ xorl %eax, %eax
+ xorl %edi, %edi
+ addb $$1, %al
+ movl $$-1, %esi
+ cmpq VREG_ADDRESS(%rcx), %rdx
+ cmovl %esi, %edi
+ cmovg %eax, %edi
+ SET_VREG %edi, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_div_int():
+% bindiv(result="%eax", second="%ecx", wide="0", suffix="l")
+
+%def op_div_int_2addr():
+% bindiv2addr(result="%eax", second="%ecx", wide="0", suffix="l")
+
+%def op_div_int_lit16():
+% bindivLit16(result="%eax")
+
+%def op_div_int_lit8():
+% bindivLit8(result="%eax")
+
+%def op_div_long():
+% bindiv(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+
+%def op_div_long_2addr():
+% bindiv2addr(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+
+%def op_int_to_byte():
+% unop(instr="movsbl %al, %eax")
+
+%def op_int_to_char():
+% unop(instr="movzwl %ax,%eax")
+
+%def op_int_to_long():
+ /* int to long vA, vB */
+ movzbq rINSTbl, %rax # rax <- +A
+ sarl $$4, %eax # eax <- B
+ andb $$0xf, rINSTbl # rINST <- A
+ movslq VREG_ADDRESS(%rax), %rax
+ SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+
+%def op_int_to_short():
+% unop(instr="movswl %ax, %eax")
+
+%def op_long_to_int():
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+% op_move()
+
+%def op_mul_int():
+% binop(instr="imull VREG_ADDRESS(%rcx), %eax")
+
+%def op_mul_int_2addr():
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ imull (rFP,rINSTq,4), %eax
+ SET_VREG %eax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_mul_int_lit16():
+% binopLit16(instr="imull %ecx, %eax")
+
+%def op_mul_int_lit8():
+% binopLit8(instr="imull %ecx, %eax")
+
+%def op_mul_long():
+% binopWide(instr="imulq VREG_ADDRESS(%rcx), %rax")
+
+%def op_mul_long_2addr():
+ /* mul vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rax, %rcx # rax <- vA
+ imulq (rFP,rINSTq,4), %rax
+ SET_WIDE_VREG %rax, %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_neg_int():
+% unop(instr=" negl %eax")
+
+%def op_neg_long():
+% unop(instr=" negq %rax", wide="1")
+
+%def op_not_int():
+% unop(instr=" notl %eax")
+
+%def op_not_long():
+% unop(instr=" notq %rax", wide="1")
+
+%def op_or_int():
+% binop(instr="orl VREG_ADDRESS(%rcx), %eax")
+
+%def op_or_int_2addr():
+% binop2addr(instr="orl %eax, VREG_ADDRESS(%rcx)")
+
+%def op_or_int_lit16():
+% binopLit16(instr="orl %ecx, %eax")
+
+%def op_or_int_lit8():
+% binopLit8(instr="orl %ecx, %eax")
+
+%def op_or_long():
+% binopWide(instr="orq VREG_ADDRESS(%rcx), %rax")
+
+%def op_or_long_2addr():
+% binopWide2addr(instr="orq %rax, VREG_ADDRESS(%rcx)")
+
+%def op_rem_int():
+% bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+
+%def op_rem_int_2addr():
+% bindiv2addr(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+
+%def op_rem_int_lit16():
+% bindivLit16(result="%edx", rem="1")
+
+%def op_rem_int_lit8():
+% bindivLit8(result="%edx", rem="1")
+
+%def op_rem_long():
+% bindiv(result="%rdx", second="%rcx", wide="1", suffix="q", ext="cqo", rem="1")
+
+%def op_rem_long_2addr():
+% bindiv2addr(result="%rdx", second="%rcx", wide="1", suffix="q", rem="1", ext="cqo")
+
+%def op_rsub_int():
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+% binopLit16(instr="subl %eax, %ecx", result="%ecx")
+
+%def op_rsub_int_lit8():
+% binopLit8(instr="subl %eax, %ecx", result="%ecx")
+
+%def op_shl_int():
+% binop1(instr="sall %cl, %eax")
+
+%def op_shl_int_2addr():
+% shop2addr(instr="sall %cl, %eax")
+
+%def op_shl_int_lit8():
+% binopLit8(instr="sall %cl, %eax")
+
+%def op_shl_long():
+% binop1(instr="salq %cl, %rax", wide="1")
+
+%def op_shl_long_2addr():
+% shop2addr(instr="salq %cl, %rax", wide="1")
+
+%def op_shr_int():
+% binop1(instr="sarl %cl, %eax")
+
+%def op_shr_int_2addr():
+% shop2addr(instr="sarl %cl, %eax")
+
+%def op_shr_int_lit8():
+% binopLit8(instr="sarl %cl, %eax")
+
+%def op_shr_long():
+% binop1(instr="sarq %cl, %rax", wide="1")
+
+%def op_shr_long_2addr():
+% shop2addr(instr="sarq %cl, %rax", wide="1")
+
+%def op_sub_int():
+% binop(instr="subl VREG_ADDRESS(%rcx), %eax")
+
+%def op_sub_int_2addr():
+% binop2addr(instr="subl %eax, VREG_ADDRESS(%rcx)")
+
+%def op_sub_long():
+% binopWide(instr="subq VREG_ADDRESS(%rcx), %rax")
+
+%def op_sub_long_2addr():
+% binopWide2addr(instr="subq %rax, VREG_ADDRESS(%rcx)")
+
+%def op_ushr_int():
+% binop1(instr="shrl %cl, %eax")
+
+%def op_ushr_int_2addr():
+% shop2addr(instr="shrl %cl, %eax")
+
+%def op_ushr_int_lit8():
+% binopLit8(instr="shrl %cl, %eax")
+
+%def op_ushr_long():
+% binop1(instr="shrq %cl, %rax", wide="1")
+
+%def op_ushr_long_2addr():
+% shop2addr(instr="shrq %cl, %rax", wide="1")
+
+%def op_xor_int():
+% binop(instr="xorl VREG_ADDRESS(%rcx), %eax")
+
+%def op_xor_int_2addr():
+% binop2addr(instr="xorl %eax, VREG_ADDRESS(%rcx)")
+
+%def op_xor_int_lit16():
+% binopLit16(instr="xorl %ecx, %eax")
+
+%def op_xor_int_lit8():
+% binopLit8(instr="xorl %ecx, %eax")
+
+%def op_xor_long():
+% binopWide(instr="xorq VREG_ADDRESS(%rcx), %rax")
+
+%def op_xor_long_2addr():
+% binopWide2addr(instr="xorq %rax, VREG_ADDRESS(%rcx)")
diff --git a/runtime/interpreter/mterp/x86_64/array.S b/runtime/interpreter/mterp/x86_64/array.S
new file mode 100644
index 0000000..e49c097
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/array.S
@@ -0,0 +1,178 @@
+%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
+/*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # eax <- BB
+ movzbq 3(rPC), %rcx # ecx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if $wide
+ movq $data_offset(%rax,%rcx,8), %rax
+ SET_WIDE_VREG %rax, rINSTq
+ .else
+ $load $data_offset(%rax,%rcx,$shift), %eax
+ SET_VREG %eax, rINSTq
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_boolean():
+% op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aget_byte():
+% op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aget_char():
+% op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aget_object():
+/*
+ * Array object get. vAA <- vBB[vCC].
+ *
+ * for: aget-object
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
+ GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
+ EXPORT_PC
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
+ movq rSELF, %rcx
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+ jnz MterpException
+ SET_VREG_OBJECT %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aget_short():
+% op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aget_wide():
+% op_aget(load="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
+
+%def op_aput(reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
+/*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 2(rPC), %rax # rax <- BB
+ movzbq 3(rPC), %rcx # rcx <- CC
+ GET_VREG %eax, %rax # eax <- vBB (array object)
+ GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
+ testl %eax, %eax # null array object?
+ je common_errNullObject # bail if so
+ cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
+ jae common_errArrayIndex # index >= length, bail.
+ .if $wide
+ GET_WIDE_VREG rINSTq, rINSTq
+ .else
+ GET_VREG rINST, rINSTq
+ .endif
+ $store $reg, $data_offset(%rax,%rcx,$shift)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_boolean():
+% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
+
+%def op_aput_byte():
+% op_aput(reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
+
+%def op_aput_char():
+% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
+
+%def op_aput_object():
+/*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpAputObject) # (array, index)
+ testb %al, %al
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_short():
+% op_aput(reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
+
+%def op_aput_wide():
+% op_aput(reg="rINSTq", store="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
+
+%def op_array_length():
+/*
+ * Return the length of an array.
+ */
+ movl rINST, %eax # eax <- BA
+ sarl $$4, rINST # rINST <- B
+ GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
+ testl %ecx, %ecx # is null?
+ je common_errNullObject
+ andb $$0xf, %al # eax <- A
+ movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+ SET_VREG rINST, %rax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_fill_array_data():
+ /* fill-array-data vAA, +BBBBBBBB */
+ EXPORT_PC
+ movslq 2(rPC), %rcx # rcx <- ssssssssBBBBbbbb
+ leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
+ GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array(helper="MterpFilledNewArray"):
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ .extern $helper
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ movq rSELF, OUT_ARG2
+ call SYMBOL($helper)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array_range():
+% op_filled_new_array(helper="MterpFilledNewArrayRange")
+
+%def op_new_array():
+/*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpNewArray)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S
deleted file mode 100644
index 6601483..0000000
--- a/runtime/interpreter/mterp/x86_64/bincmp.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Generic two-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
- */
- /* if-cmp vA, vB, +CCCC */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # rcx <- A
- GET_VREG %eax, %rcx # eax <- vA
- cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
- j${revcmp} 1f
- movswq 2(rPC), rINSTq # Get signed branch offset
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/bindiv.S b/runtime/interpreter/mterp/x86_64/bindiv.S
deleted file mode 100644
index e10d1dc..0000000
--- a/runtime/interpreter/mterp/x86_64/bindiv.S
+++ /dev/null
@@ -1,34 +0,0 @@
-%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- .if $wide
- GET_WIDE_VREG %rax, %rax # eax <- vBB
- GET_WIDE_VREG $second, %rcx # ecx <- vCC
- .else
- GET_VREG %eax, %rax # eax <- vBB
- GET_VREG $second, %rcx # ecx <- vCC
- .endif
- test${suffix} $second, $second
- jz common_errDivideByZero
- cmp${suffix} $$-1, $second
- je 2f
- $ext # rdx:rax <- sign-extended of rax
- idiv${suffix} $second
-1:
- .if $wide
- SET_WIDE_VREG $result, rINSTq # eax <- vBB
- .else
- SET_VREG $result, rINSTq # eax <- vBB
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if $rem
- xor${suffix} $result, $result
- .else
- neg${suffix} $result
- .endif
- jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindiv2addr.S b/runtime/interpreter/mterp/x86_64/bindiv2addr.S
deleted file mode 100644
index 8b9bc95..0000000
--- a/runtime/interpreter/mterp/x86_64/bindiv2addr.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"result":"","second":"","wide":"","suffix":"","rem":"0","ext":"cdq"}
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/2addr vA, vB */
- movl rINST, %ecx # rcx <- BA
- sarl $$4, %ecx # rcx <- B
- andb $$0xf, rINSTbl # rINST <- A
- .if $wide
- GET_WIDE_VREG %rax, rINSTq # eax <- vA
- GET_WIDE_VREG $second, %rcx # ecx <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vA
- GET_VREG $second, %rcx # ecx <- vB
- .endif
- test${suffix} $second, $second
- jz common_errDivideByZero
- cmp${suffix} $$-1, $second
- je 2f
- $ext # rdx:rax <- sign-extended of rax
- idiv${suffix} $second
-1:
- .if $wide
- SET_WIDE_VREG $result, rINSTq # vA <- result
- .else
- SET_VREG $result, rINSTq # vA <- result
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-2:
- .if $rem
- xor${suffix} $result, $result
- .else
- neg${suffix} $result
- .endif
- jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit16.S b/runtime/interpreter/mterp/x86_64/bindivLit16.S
deleted file mode 100644
index 80dbce2..0000000
--- a/runtime/interpreter/mterp/x86_64/bindivLit16.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {"result":"","rem":"0"}
-/*
- * 32-bit binary div/rem operation. Handles special case of op1=-1.
- */
- /* div/rem/lit16 vA, vB, #+CCCC */
- /* Need A in rINST, ssssCCCC in ecx, vB in eax */
- movl rINST, %eax # rax <- 000000BA
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- andb $$0xf, rINSTbl # rINST <- A
- testl %ecx, %ecx
- jz common_errDivideByZero
- cmpl $$-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG $result, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if $rem
- xorl $result, $result
- .else
- negl $result
- .endif
- jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/bindivLit8.S b/runtime/interpreter/mterp/x86_64/bindivLit8.S
deleted file mode 100644
index ab535f3..0000000
--- a/runtime/interpreter/mterp/x86_64/bindivLit8.S
+++ /dev/null
@@ -1,25 +0,0 @@
-%default {"result":"","rem":"0"}
-/*
- * 32-bit div/rem "lit8" binary operation. Handles special case of
- * op0=minint & op1=-1
- */
- /* div/rem/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # eax <- BB
- movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- testl %ecx, %ecx
- je common_errDivideByZero
- cmpl $$-1, %ecx
- je 2f
- cdq # rax <- sign-extended of eax
- idivl %ecx
-1:
- SET_VREG $result, rINSTq # vA <- result
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-2:
- .if $rem
- xorl $result, $result
- .else
- negl $result
- .endif
- jmp 1b
diff --git a/runtime/interpreter/mterp/x86_64/binop.S b/runtime/interpreter/mterp/x86_64/binop.S
deleted file mode 100644
index 962dd61..0000000
--- a/runtime/interpreter/mterp/x86_64/binop.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit binary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = eax op (rFP,%ecx,4)".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int, sub-int, and-int, or-int,
- * xor-int, shl-int, shr-int, ushr-int
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB
- $instr # ex: addl (rFP,%rcx,4),%eax
- SET_VREG $result, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop1.S b/runtime/interpreter/mterp/x86_64/binop1.S
deleted file mode 100644
index bdd5732..0000000
--- a/runtime/interpreter/mterp/x86_64/binop1.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"0"}
-/*
- * Generic 32-bit binary operation in which both operands loaded to
- * registers (op0 in eax, op1 in ecx).
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %ecx, %rcx # eax <- vCC
- .if $wide
- GET_WIDE_VREG %rax, %rax # rax <- vBB
- $instr # ex: addl %ecx,%eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, %rax # eax <- vBB
- $instr # ex: addl %ecx,%eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binop2addr.S b/runtime/interpreter/mterp/x86_64/binop2addr.S
deleted file mode 100644
index 4448a81..0000000
--- a/runtime/interpreter/mterp/x86_64/binop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = r0 op r1".
- * This could be an instruction or a function call.
- *
- * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
- * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
- * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
- * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_VREG %eax, rINSTq # eax <- vB
- $instr # for ex: addl %eax,(rFP,%ecx,4)
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/binopLit16.S b/runtime/interpreter/mterp/x86_64/binopLit16.S
deleted file mode 100644
index de43b53..0000000
--- a/runtime/interpreter/mterp/x86_64/binopLit16.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit16" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than eax, you can override "result".)
- *
- * For: add-int/lit16, rsub-int,
- * and-int/lit16, or-int/lit16, xor-int/lit16
- */
- /* binop/lit16 vA, vB, #+CCCC */
- movl rINST, %eax # rax <- 000000BA
- sarl $$4, %eax # eax <- B
- GET_VREG %eax, %rax # eax <- vB
- andb $$0xf, rINSTbl # rINST <- A
- movswl 2(rPC), %ecx # ecx <- ssssCCCC
- $instr # for example: addl %ecx, %eax
- SET_VREG $result, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopLit8.S b/runtime/interpreter/mterp/x86_64/binopLit8.S
deleted file mode 100644
index 995002b..0000000
--- a/runtime/interpreter/mterp/x86_64/binopLit8.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default {"result":"%eax"}
-/*
- * Generic 32-bit "lit8" binary operation. Provide an "instr" line
- * that specifies an instruction that performs "result = eax op ecx".
- * This could be an x86 instruction or a function call. (If the result
- * comes back in a register other than r0, you can override "result".)
- *
- * For: add-int/lit8, rsub-int/lit8
- * and-int/lit8, or-int/lit8, xor-int/lit8,
- * shl-int/lit8, shr-int/lit8, ushr-int/lit8
- */
- /* binop/lit8 vAA, vBB, #+CC */
- movzbq 2(rPC), %rax # rax <- BB
- movsbl 3(rPC), %ecx # rcx <- ssssssCC
- GET_VREG %eax, %rax # eax <- rBB
- $instr # ex: addl %ecx,%eax
- SET_VREG $result, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide.S b/runtime/interpreter/mterp/x86_64/binopWide.S
deleted file mode 100644
index f92f18e..0000000
--- a/runtime/interpreter/mterp/x86_64/binopWide.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
- /* binop vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- $instr # ex: addq (rFP,%rcx,4),%rax
- SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/binopWide2addr.S b/runtime/interpreter/mterp/x86_64/binopWide2addr.S
deleted file mode 100644
index d9e6cfb..0000000
--- a/runtime/interpreter/mterp/x86_64/binopWide2addr.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Generic 64-bit binary operation.
- */
- /* binop/2addr vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- $instr # for ex: addq %rax,(rFP,%rcx,4)
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/const.S b/runtime/interpreter/mterp/x86_64/const.S
deleted file mode 100644
index 1ddf20f..0000000
--- a/runtime/interpreter/mterp/x86_64/const.S
+++ /dev/null
@@ -1,15 +0,0 @@
-%default { "helper":"UndefinedConstHandler" }
- /* const/class vAA, type@BBBB */
- /* const/method-handle vAA, method_handle@BBBB */
- /* const/method-type vAA, proto@BBBB */
- /* const/string vAA, string@@BBBB */
- .extern $helper
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/control_flow.S b/runtime/interpreter/mterp/x86_64/control_flow.S
new file mode 100644
index 0000000..2f3b5e5
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/control_flow.S
@@ -0,0 +1,206 @@
+%def bincmp(revcmp=""):
+/*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # rcx <- A
+ GET_VREG %eax, %rcx # eax <- vA
+ cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB)
+ j${revcmp} 1f
+ movswq 2(rPC), rINSTq # Get signed branch offset
+ testq rINSTq, rINSTq
+ jmp MterpCommonTakenBranch
+1:
+ cmpl $$JIT_CHECK_OSR, rPROFILE
+ je .L_check_not_taken_osr
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def zcmp(revcmp=""):
+/*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ cmpl $$0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
+ j${revcmp} 1f
+ movswq 2(rPC), rINSTq # fetch signed displacement
+ testq rINSTq, rINSTq
+ jmp MterpCommonTakenBranch
+1:
+ cmpl $$JIT_CHECK_OSR, rPROFILE
+ je .L_check_not_taken_osr
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_goto():
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
+ testq rINSTq, rINSTq
+ jmp MterpCommonTakenBranch
+
+%def op_goto_16():
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
+ testq rINSTq, rINSTq
+ jmp MterpCommonTakenBranch
+
+%def op_goto_32():
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
+ testq rINSTq, rINSTq
+ jmp MterpCommonTakenBranch
+
+%def op_if_eq():
+% bincmp(revcmp="ne")
+
+%def op_if_eqz():
+% zcmp(revcmp="ne")
+
+%def op_if_ge():
+% bincmp(revcmp="l")
+
+%def op_if_gez():
+% zcmp(revcmp="l")
+
+%def op_if_gt():
+% bincmp(revcmp="le")
+
+%def op_if_gtz():
+% zcmp(revcmp="le")
+
+%def op_if_le():
+% bincmp(revcmp="g")
+
+%def op_if_lez():
+% zcmp(revcmp="g")
+
+%def op_if_lt():
+% bincmp(revcmp="ge")
+
+%def op_if_ltz():
+% zcmp(revcmp="ge")
+
+%def op_if_ne():
+% bincmp(revcmp="e")
+
+%def op_if_nez():
+% zcmp(revcmp="e")
+
+%def op_packed_switch(func="MterpDoPackedSwitch"):
+/*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
+ leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
+ GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
+ call SYMBOL($func)
+ testl %eax, %eax
+ movslq %eax, rINSTq
+ jmp MterpCommonTakenBranch
+
+%def op_return():
+/*
+ * Return a 32-bit value.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movq rSELF, OUT_ARG0
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ jz 1f
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_VREG %eax, rINSTq # eax <- vAA
+ jmp MterpReturn
+
+%def op_return_object():
+% op_return()
+
+%def op_return_void():
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movq rSELF, OUT_ARG0
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ jz 1f
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
+
+%def op_return_void_no_barrier():
+ movq rSELF, OUT_ARG0
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ jz 1f
+ call SYMBOL(MterpSuspendCheck)
+1:
+ xorq %rax, %rax
+ jmp MterpReturn
+
+%def op_return_wide():
+/*
+ * Return a 64-bit value.
+ */
+ /* return-wide vAA */
+ .extern MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
+ movq rSELF, OUT_ARG0
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
+ jz 1f
+ call SYMBOL(MterpSuspendCheck)
+1:
+ GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
+ jmp MterpReturn
+
+%def op_sparse_switch():
+% op_packed_switch(func="MterpDoSparseSwitch")
+
+%def op_throw():
+/*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ EXPORT_PC
+ GET_VREG %eax, rINSTq # eax<- vAA (exception object)
+ testb %al, %al
+ jz common_errNullObject
+ movq rSELF, %rcx
+ movq %rax, THREAD_EXCEPTION_OFFSET(%rcx)
+ jmp MterpException
diff --git a/runtime/interpreter/mterp/x86_64/cvtfp_int.S b/runtime/interpreter/mterp/x86_64/cvtfp_int.S
deleted file mode 100644
index 1472bd2..0000000
--- a/runtime/interpreter/mterp/x86_64/cvtfp_int.S
+++ /dev/null
@@ -1,27 +0,0 @@
-%default {"fp_suffix":"","i_suffix":"","max_const":"","result_reg":"","wide":""}
-/* On fp to int conversions, Java requires that
- * if the result > maxint, it should be clamped to maxint. If it is less
- * than minint, it should be clamped to minint. If it is a nan, the result
- * should be zero. Further, the rounding mode is to truncate.
- */
- /* float/double to int/long vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
- mov${i_suffix} ${max_const}, ${result_reg}
- cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
- comis${fp_suffix} %xmm1, %xmm0
- jae 1f
- jp 2f
- cvtts${fp_suffix}2si${i_suffix} %xmm0, ${result_reg}
- jmp 1f
-2:
- xor${i_suffix} ${result_reg}, ${result_reg}
-1:
- .if $wide
- SET_WIDE_VREG ${result_reg}, %rcx
- .else
- SET_VREG ${result_reg}, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/entry.S b/runtime/interpreter/mterp/x86_64/entry.S
deleted file mode 100644
index b08419b..0000000
--- a/runtime/interpreter/mterp/x86_64/entry.S
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
- .text
- ASM_HIDDEN SYMBOL(ExecuteMterpImpl)
- .global SYMBOL(ExecuteMterpImpl)
- FUNCTION_TYPE(ExecuteMterpImpl)
-
-/*
- * On entry:
- * 0 Thread* self
- * 1 insns_
- * 2 ShadowFrame
- * 3 JValue* result_register
- *
- */
-
-SYMBOL(ExecuteMterpImpl):
- .cfi_startproc
- .cfi_def_cfa rsp, 8
-
- /* Spill callee save regs */
- PUSH %rbx
- PUSH %rbp
- PUSH %r12
- PUSH %r13
- PUSH %r14
- PUSH %r15
-
- /* Allocate frame */
- subq $$FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset FRAME_SIZE
-
- /* Remember the return register */
- movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
-
- /* Remember the code_item */
- movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
-
- /* set up "named" registers */
- movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
- leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
- leaq (rFP, %rax, 4), rREFS
- movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
- leaq (IN_ARG1, %rax, 2), rPC
- CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
- EXPORT_PC
-
- /* Starting ibase */
- movq IN_ARG0, rSELF
- REFRESH_IBASE_REG IN_ARG0
-
- /* Set up for backwards branches & osr profiling */
- movq IN_ARG0, OUT_ARG2 /* Set up OUT_ARG2 before clobbering IN_ARG0 */
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpSetUpHotnessCountdown)
- movswl %ax, rPROFILE
-
- /* start executing the instruction at rPC */
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
diff --git a/runtime/interpreter/mterp/x86_64/fallback.S b/runtime/interpreter/mterp/x86_64/fallback.S
deleted file mode 100644
index 8d61166..0000000
--- a/runtime/interpreter/mterp/x86_64/fallback.S
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Transfer stub to alternate interpreter */
- jmp MterpFallback
-
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64/floating_point.S
new file mode 100644
index 0000000..b40c0e6
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/floating_point.S
@@ -0,0 +1,236 @@
+%def fpcmp(suff="d", nanval="pos"):
+/*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return nanval ? 1 : -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx<- CC
+ movzbq 2(rPC), %rax # eax<- BB
+ movs${suff} VREG_ADDRESS(%rax), %xmm0
+ xor %eax, %eax
+ ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
+ jp .L${opcode}_nan_is_${nanval}
+ je .L${opcode}_finish
+ jb .L${opcode}_less
+.L${opcode}_nan_is_pos:
+ addb $$1, %al
+ jmp .L${opcode}_finish
+.L${opcode}_nan_is_neg:
+.L${opcode}_less:
+ movl $$-1, %eax
+.L${opcode}_finish:
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def fpcvt(source_suffix="", dest_suffix="", wide=""):
+/*
+ * Generic 32-bit FP conversion operation.
+ */
+ /* unop vA, vB */
+ movl rINST, %ecx # rcx <- A+
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ cvts${source_suffix}2s${dest_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ .if $wide
+ movsd %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_WIDE_REF %rcx
+ .else
+ movss %xmm0, VREG_ADDRESS(%rcx)
+ CLEAR_REF %rcx
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def sseBinop(instr="", suff=""):
+ movzbq 2(rPC), %rcx # ecx <- BB
+ movzbq 3(rPC), %rax # eax <- CC
+ movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def sseBinop2Addr(instr="", suff=""):
+ movl rINST, %ecx # ecx <- A+
+ andl $$0xf, %ecx # ecx <- A
+ movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ sarl $$4, rINST # rINST<- B
+ ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
+ movs${suff} %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ pxor %xmm0, %xmm0
+ movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_add_double():
+% sseBinop(instr="adds", suff="d")
+
+%def op_add_double_2addr():
+% sseBinop2Addr(instr="adds", suff="d")
+
+%def op_add_float():
+% sseBinop(instr="adds", suff="s")
+
+%def op_add_float_2addr():
+% sseBinop2Addr(instr="adds", suff="s")
+
+%def op_cmpg_double():
+% fpcmp(suff="d", nanval="pos")
+
+%def op_cmpg_float():
+% fpcmp(suff="s", nanval="pos")
+
+%def op_cmpl_double():
+% fpcmp(suff="d", nanval="neg")
+
+%def op_cmpl_float():
+% fpcmp(suff="s", nanval="neg")
+
+%def op_div_double():
+% sseBinop(instr="divs", suff="d")
+
+%def op_div_double_2addr():
+% sseBinop2Addr(instr="divs", suff="d")
+
+%def op_div_float():
+% sseBinop(instr="divs", suff="s")
+
+%def op_div_float_2addr():
+% sseBinop2Addr(instr="divs", suff="s")
+
+%def op_double_to_float():
+% fpcvt(source_suffix="d", dest_suffix="s", wide="0")
+
+%def op_double_to_int():
+% cvtfp_int(fp_suffix="d", i_suffix="l", max_const="$0x7fffffff", result_reg="%eax", wide="0")
+
+%def op_double_to_long():
+% cvtfp_int(fp_suffix="d", i_suffix="q", max_const="$0x7fffffffffffffff", result_reg="%rax", wide="1")
+
+%def op_float_to_double():
+% fpcvt(source_suffix="s", dest_suffix="d", wide="1")
+
+%def op_float_to_int():
+% cvtfp_int(fp_suffix="s", i_suffix="l", max_const="$0x7fffffff", result_reg="%eax", wide="0")
+
+%def op_float_to_long():
+% cvtfp_int(fp_suffix="s", i_suffix="q", max_const="$0x7fffffffffffffff", result_reg="%rax", wide="1")
+
+%def op_int_to_double():
+% fpcvt(source_suffix="i", dest_suffix="dl", wide="1")
+
+%def op_int_to_float():
+% fpcvt(source_suffix="i", dest_suffix="sl", wide="0")
+
+%def op_long_to_double():
+% fpcvt(source_suffix="i", dest_suffix="dq", wide="1")
+
+%def op_long_to_float():
+% fpcvt(source_suffix="i", dest_suffix="sq", wide="0")
+
+%def op_mul_double():
+% sseBinop(instr="muls", suff="d")
+
+%def op_mul_double_2addr():
+% sseBinop2Addr(instr="muls", suff="d")
+
+%def op_mul_float():
+% sseBinop(instr="muls", suff="s")
+
+%def op_mul_float_2addr():
+% sseBinop2Addr(instr="muls", suff="s")
+
+%def op_neg_double():
+% unop(preinstr=" movq $0x8000000000000000, %rsi", instr=" xorq %rsi, %rax", wide="1")
+
+%def op_neg_float():
+% unop(instr=" xorl $0x80000000, %eax")
+
+%def op_rem_double():
+ /* rem_double vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
+ fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
+ CLEAR_WIDE_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_double_2addr():
+ /* rem_double/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ fldl VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ fldl VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstpl VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_WIDE_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_rem_float():
+ /* rem_float vAA, vBB, vCC */
+ movzbq 3(rPC), %rcx # ecx <- BB
+ movzbq 2(rPC), %rax # eax <- CC
+ flds VREG_ADDRESS(%rcx) # vBB to fp stack
+ flds VREG_ADDRESS(%rax) # vCC to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(rINSTq) # %st to vAA
+ CLEAR_REF rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_rem_float_2addr():
+ /* rem_float/2addr vA, vB */
+ movzbq rINSTbl, %rcx # ecx <- A+
+ sarl $$4, rINST # rINST <- B
+ flds VREG_ADDRESS(rINSTq) # vB to fp stack
+ andb $$0xf, %cl # ecx <- A
+ flds VREG_ADDRESS(%rcx) # vA to fp stack
+1:
+ fprem
+ fstsw %ax
+ sahf
+ jp 1b
+ fstp %st(1)
+ fstps VREG_ADDRESS(%rcx) # %st to vA
+ CLEAR_REF %rcx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_sub_double():
+% sseBinop(instr="subs", suff="d")
+
+%def op_sub_double_2addr():
+% sseBinop2Addr(instr="subs", suff="d")
+
+%def op_sub_float():
+% sseBinop(instr="subs", suff="s")
+
+%def op_sub_float_2addr():
+% sseBinop2Addr(instr="subs", suff="s")
diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S
deleted file mode 100644
index 3cc7532..0000000
--- a/runtime/interpreter/mterp/x86_64/footer.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * ===========================================================================
- * Common subroutines and data
- * ===========================================================================
- */
-
- .text
- .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogDivideByZeroException)
-#endif
- jmp MterpCommonFallback
-
-common_errArrayIndex:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogArrayIndexException)
-#endif
- jmp MterpCommonFallback
-
-common_errNegativeArraySize:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNegativeArraySizeException)
-#endif
- jmp MterpCommonFallback
-
-common_errNoSuchMethod:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNoSuchMethodException)
-#endif
- jmp MterpCommonFallback
-
-common_errNullObject:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogNullObjectException)
-#endif
- jmp MterpCommonFallback
-
-common_exceptionThrown:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogExceptionThrownException)
-#endif
- jmp MterpCommonFallback
-
-MterpSuspendFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
- call SYMBOL(MterpLogSuspendFallback)
-#endif
- jmp MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary. If there is a pending
- * exception, handle it. Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jz MterpFallback
- /* intentional fallthrough - handle pending exception. */
-
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpHandleException)
- testb %al, %al
- jz MterpExceptionReturn
- movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
- mov OFF_FP_DEX_PC(rFP), %ecx
- leaq (%rax, %rcx, 2), rPC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
- /* Do we need to switch interpreters? */
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- /* resume execution at catch block */
- REFRESH_IBASE
- FETCH_INST
- GOTO_NEXT
- /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- * rINST <= signed offset
- * rPROFILE <= signed hotness countdown (expanded to 32 bits)
- * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- * If profiling active, do hotness countdown and report if we hit zero.
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- * Is there a pending suspend request? If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranch:
- jg .L_forward_branch # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-# error "JIT_CHECK_OSR must be -1."
-#endif
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_osr_check
- decl rPROFILE
- je .L_add_batch # counted down to zero - report
-.L_resume_backward_branch:
- movq rSELF, %rax
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
- REFRESH_IBASE_REG %rax
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- jnz .L_suspend_request_pending
- GOTO_NEXT
-
-.L_suspend_request_pending:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- call SYMBOL(MterpSuspendCheck) # (self)
- testb %al, %al
- jnz MterpFallback
- REFRESH_IBASE # might have changed during suspend
- GOTO_NEXT
-
-.L_no_count_backwards:
- cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- jne .L_resume_backward_branch
-.L_osr_check:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_backward_branch
- jmp MterpOnStackReplacement
-
-.L_forward_branch:
- cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
- je .L_check_osr_forward
-.L_resume_forward_branch:
- leaq (rPC, rINSTq, 2), rPC
- FETCH_INST
- GOTO_NEXT
-
-.L_check_osr_forward:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jz .L_resume_forward_branch
- jmp MterpOnStackReplacement
-
-.L_add_batch:
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movswl %ax, rPROFILE
- jmp .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl $$2, OUT_32_ARG2
- call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
- testb %al, %al
- jnz MterpOnStackReplacement
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpLogOSR)
-#endif
- movl $$1, %eax
- jmp MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
- EXPORT_PC
-#if MTERP_LOGGING
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- call SYMBOL(MterpLogFallback)
-#endif
-MterpCommonFallback:
- xorl %eax, %eax
- jmp MterpDone
-
-/*
- * On entry:
- * uint32_t* rFP (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
- movl $$1, %eax
- jmp MterpDone
-MterpReturn:
- movq OFF_FP_RESULT_REGISTER(rFP), %rdx
- movq %rax, (%rdx)
- movl $$1, %eax
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
- * checking for OSR. If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
- testl rPROFILE, rPROFILE
- jle MRestoreFrame # if > 0, we may have some counts to report.
-
- movl %eax, rINST # stash return value
- /* Report cached hotness counts */
- movl rPROFILE, %eax
- movq OFF_FP_METHOD(rFP), OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
- movq rSELF, OUT_ARG2
- call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
- movl rINST, %eax # restore return value
-
- /* pop up frame */
-MRestoreFrame:
- addq $$FRAME_SIZE, %rsp
- .cfi_adjust_cfa_offset -FRAME_SIZE
-
- /* Restore callee save register */
- POP %r15
- POP %r14
- POP %r13
- POP %r12
- POP %rbp
- POP %rbx
- ret
- .cfi_endproc
- SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86_64/fpcmp.S b/runtime/interpreter/mterp/x86_64/fpcmp.S
deleted file mode 100644
index 806bc2b..0000000
--- a/runtime/interpreter/mterp/x86_64/fpcmp.S
+++ /dev/null
@@ -1,35 +0,0 @@
-%default {"suff":"d","nanval":"pos"}
-/*
- * Compare two floating-point values. Puts 0, 1, or -1 into the
- * destination register based on the results of the comparison.
- *
- * int compare(x, y) {
- * if (x == y) {
- * return 0;
- * } else if (x < y) {
- * return -1;
- * } else if (x > y) {
- * return 1;
- * } else {
- * return nanval ? 1 : -1;
- * }
- * }
- */
- /* op vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx<- CC
- movzbq 2(rPC), %rax # eax<- BB
- movs${suff} VREG_ADDRESS(%rax), %xmm0
- xor %eax, %eax
- ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
- jp .L${opcode}_nan_is_${nanval}
- je .L${opcode}_finish
- jb .L${opcode}_less
-.L${opcode}_nan_is_pos:
- addb $$1, %al
- jmp .L${opcode}_finish
-.L${opcode}_nan_is_neg:
-.L${opcode}_less:
- movl $$-1, %eax
-.L${opcode}_finish:
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/fpcvt.S b/runtime/interpreter/mterp/x86_64/fpcvt.S
deleted file mode 100644
index 657869e..0000000
--- a/runtime/interpreter/mterp/x86_64/fpcvt.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default {"source_suffix":"","dest_suffix":"","wide":""}
-/*
- * Generic 32-bit FP conversion operation.
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- cvts${source_suffix}2s${dest_suffix} VREG_ADDRESS(rINSTq), %xmm0
- .if $wide
- movsd %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_WIDE_REF %rcx
- .else
- movss %xmm0, VREG_ADDRESS(%rcx)
- CLEAR_REF %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S
deleted file mode 100644
index 0332ce2..0000000
--- a/runtime/interpreter/mterp/x86_64/header.S
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- Art assembly interpreter notes:
-
- First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
- handle invoke, allows higher-level code to create frame & shadow frame.
-
- Once that's working, support direct entry code & eliminate shadow frame (and
- excess locals allocation.
-
- Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
- base of the vreg array within the shadow frame. Access the other fields,
- dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
- the shadow frame mechanism of double-storing object references - via rFP &
- number_of_vregs_.
-
- */
-
-/*
-x86_64 ABI general notes:
-
-Caller save set:
- rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
-Callee save set:
- rbx, rbp, r12-r15
-Return regs:
- 32-bit in eax
- 64-bit in rax
- fp on xmm0
-
-First 8 fp parameters came in xmm0-xmm7.
-First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
-Other parameters passed on stack, pushed right-to-left. On entry to target, first
-param is at 8(%esp). Traditional entry code is:
-
-Stack must be 16-byte aligned to support SSE in native code.
-
-If we're not doing variable stack allocation (alloca), the frame pointer can be
-eliminated and all arg references adjusted to be esp relative.
-*/
-
-/*
-Mterp and x86_64 notes:
-
-Some key interpreter variables will be assigned to registers.
-
- nick reg purpose
- rPROFILE rbp countdown register for jit profiling
- rPC r12 interpreted program counter, used for fetching instructions
- rFP r13 interpreted frame pointer, used for accessing locals and args
- rINSTw bx first 16-bit code of current instruction
- rINSTbl bl opcode portion of instruction word
- rINSTbh bh high byte of inst word, usually contains src/tgt reg names
- rIBASE r14 base of instruction handler table
- rREFS r15 base of object references in shadow frame.
-
-Notes:
- o High order 16 bits of ebx must be zero on entry to handler
- o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
- o eax and ecx are scratch, rINSTw/ebx sometimes scratch
-
-Macros are provided for common operations. Each macro MUST emit only
-one instruction to make instruction-counting easier. They MUST NOT alter
-unspecified registers or condition codes.
-*/
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Handle mac compiler specific
- */
-#if defined(__APPLE__)
- #define MACRO_LITERAL(value) $$(value)
- #define FUNCTION_TYPE(name)
- #define OBJECT_TYPE(name)
- #define SIZE(start,end)
- // Mac OS' symbols have an _ prefix.
- #define SYMBOL(name) _ ## name
- #define ASM_HIDDEN .private_extern
-#else
- #define MACRO_LITERAL(value) $$value
- #define FUNCTION_TYPE(name) .type name, @function
- #define OBJECT_TYPE(name) .type name, @object
- #define SIZE(start,end) .size start, .-end
- #define SYMBOL(name) name
- #define ASM_HIDDEN .hidden
-#endif
-
-.macro PUSH _reg
- pushq \_reg
- .cfi_adjust_cfa_offset 8
- .cfi_rel_offset \_reg, 0
-.endm
-
-.macro POP _reg
- popq \_reg
- .cfi_adjust_cfa_offset -8
- .cfi_restore \_reg
-.endm
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
- * to access other shadow frame fields, we need to use a backwards offset. Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
-#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
-
-/* Frame size must be 16-byte aligned.
- * Remember about 8 bytes for return address + 6 * 8 for spills.
- */
-#define FRAME_SIZE 8
-
-/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
-#define IN_ARG3 %rcx
-#define IN_ARG2 %rdx
-#define IN_ARG1 %rsi
-#define IN_ARG0 %rdi
-/* Spill offsets relative to %esp */
-#define SELF_SPILL (FRAME_SIZE - 8)
-/* Out Args */
-#define OUT_ARG3 %rcx
-#define OUT_ARG2 %rdx
-#define OUT_ARG1 %rsi
-#define OUT_ARG0 %rdi
-#define OUT_32_ARG3 %ecx
-#define OUT_32_ARG2 %edx
-#define OUT_32_ARG1 %esi
-#define OUT_32_ARG0 %edi
-#define OUT_FP_ARG1 %xmm1
-#define OUT_FP_ARG0 %xmm0
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rSELF SELF_SPILL(%rsp)
-#define rPC %r12
-#define CFI_DEX 12 // DWARF register number of the register holding dex-pc (rPC).
-#define CFI_TMP 5 // DWARF register number of the first argument register (rdi).
-#define rFP %r13
-#define rINST %ebx
-#define rINSTq %rbx
-#define rINSTw %bx
-#define rINSTbh %bh
-#define rINSTbl %bl
-#define rIBASE %r14
-#define rREFS %r15
-#define rPROFILE %ebp
-
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array. For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
- movq rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- * IBase handles uses the caller save register so we must restore it after each call.
- * Also it is used as a result of some 64-bit operations (like imul) and we should
- * restore it in such cases also.
- *
- */
-.macro REFRESH_IBASE_REG self_reg
- movq THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
-.endm
-.macro REFRESH_IBASE
- movq rSELF, rIBASE
- REFRESH_IBASE_REG rIBASE
-.endm
-
-/*
- * Refresh rINST.
- * At enter to handler rINST does not contain the opcode number.
- * However some utilities require the full value, so this macro
- * restores the opcode number.
- */
-.macro REFRESH_INST _opnum
- movb rINSTbl, rINSTbh
- movb $$\_opnum, rINSTbl
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
- */
-.macro FETCH_INST
- movzwq (rPC), rINSTq
-.endm
-
-/*
- * Remove opcode from rINST, compute the address of handler and jump to it.
- */
-.macro GOTO_NEXT
- movzx rINSTbl,%eax
- movzbl rINSTbh,rINST
- shll MACRO_LITERAL(${handler_size_bits}), %eax
- addq rIBASE, %rax
- jmp *%rax
-.endm
-
-/*
- * Advance rPC by instruction count.
- */
-.macro ADVANCE_PC _count
- leaq 2*\_count(rPC), rPC
-.endm
-
-/*
- * Advance rPC by instruction count, fetch instruction and jump to handler.
- */
-.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
- ADVANCE_PC \_count
- FETCH_INST
- GOTO_NEXT
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
-#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
-
-.macro GET_VREG _reg _vreg
- movl (rFP,\_vreg,4), \_reg
-.endm
-
-/* Read wide value. */
-.macro GET_WIDE_VREG _reg _vreg
- movq (rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-/* Write wide value. reg is clobbered. */
-.macro SET_WIDE_VREG _reg _vreg
- movq \_reg, (rFP,\_vreg,4)
- xorq \_reg, \_reg
- movq \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro SET_VREG_OBJECT _reg _vreg
- movl \_reg, (rFP,\_vreg,4)
- movl \_reg, (rREFS,\_vreg,4)
-.endm
-
-.macro GET_VREG_HIGH _reg _vreg
- movl 4(rFP,\_vreg,4), \_reg
-.endm
-
-.macro SET_VREG_HIGH _reg _vreg
- movl \_reg, 4(rFP,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
-.endm
-
-.macro CLEAR_WIDE_REF _vreg
- movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
- movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
-.endm
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end.S b/runtime/interpreter/mterp/x86_64/instruction_end.S
deleted file mode 100644
index 94587f8..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
- .global SYMBOL(artMterpAsmInstructionEnd)
-SYMBOL(artMterpAsmInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S b/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
deleted file mode 100644
index 7757bce..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end_alt.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmAltInstructionEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionEnd)
- .global SYMBOL(artMterpAsmAltInstructionEnd)
-SYMBOL(artMterpAsmAltInstructionEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S b/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
deleted file mode 100644
index 8eb79ac..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_end_sister.S
+++ /dev/null
@@ -1,5 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmSisterEnd)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterEnd)
- .global SYMBOL(artMterpAsmSisterEnd)
-SYMBOL(artMterpAsmSisterEnd):
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start.S b/runtime/interpreter/mterp/x86_64/instruction_start.S
deleted file mode 100644
index 5d29a819..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
- .global SYMBOL(artMterpAsmInstructionStart)
-SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
- .text
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S b/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
deleted file mode 100644
index 8dcf5bf..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start_alt.S
+++ /dev/null
@@ -1,6 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmAltInstructionStart)
- ASM_HIDDEN SYMBOL(artMterpAsmAltInstructionStart)
- .global SYMBOL(artMterpAsmAltInstructionStart)
- .text
-SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
diff --git a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S b/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
deleted file mode 100644
index 796e98b..0000000
--- a/runtime/interpreter/mterp/x86_64/instruction_start_sister.S
+++ /dev/null
@@ -1,7 +0,0 @@
-
- OBJECT_TYPE(artMterpAsmSisterStart)
- ASM_HIDDEN SYMBOL(artMterpAsmSisterStart)
- .global SYMBOL(artMterpAsmSisterStart)
- .text
- .balign 4
-SYMBOL(artMterpAsmSisterStart):
diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S
index f7e6155..63c233c 100644
--- a/runtime/interpreter/mterp/x86_64/invoke.S
+++ b/runtime/interpreter/mterp/x86_64/invoke.S
@@ -1,4 +1,4 @@
-%default { "helper":"UndefinedInvokeHandler" }
+%def invoke(helper="UndefinedInvokeHandler"):
/*
* Generic invoke handler wrapper.
*/
@@ -20,3 +20,96 @@
jnz MterpFallback
FETCH_INST
GOTO_NEXT
+
+%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
+ /*
+ * invoke-polymorphic handler wrapper.
+ */
+ /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
+ .extern $helper
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG3
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC 4
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ FETCH_INST
+ GOTO_NEXT
+
+%def op_invoke_custom():
+% invoke(helper="MterpInvokeCustom")
+
+%def op_invoke_custom_range():
+% invoke(helper="MterpInvokeCustomRange")
+
+%def op_invoke_direct():
+% invoke(helper="MterpInvokeDirect")
+
+%def op_invoke_direct_range():
+% invoke(helper="MterpInvokeDirectRange")
+
+%def op_invoke_interface():
+% invoke(helper="MterpInvokeInterface")
+/*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_interface_range():
+% invoke(helper="MterpInvokeInterfaceRange")
+
+%def op_invoke_polymorphic():
+% invoke_polymorphic(helper="MterpInvokePolymorphic")
+
+%def op_invoke_polymorphic_range():
+% invoke_polymorphic(helper="MterpInvokePolymorphicRange")
+
+%def op_invoke_static():
+% invoke(helper="MterpInvokeStatic")
+
+
+%def op_invoke_static_range():
+% invoke(helper="MterpInvokeStaticRange")
+
+%def op_invoke_super():
+% invoke(helper="MterpInvokeSuper")
+/*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_super_range():
+% invoke(helper="MterpInvokeSuperRange")
+
+%def op_invoke_virtual():
+% invoke(helper="MterpInvokeVirtual")
+/*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+
+%def op_invoke_virtual_quick():
+% invoke(helper="MterpInvokeVirtualQuick")
+
+%def op_invoke_virtual_range():
+% invoke(helper="MterpInvokeVirtualRange")
+
+%def op_invoke_virtual_range_quick():
+% invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
deleted file mode 100644
index 5157860..0000000
--- a/runtime/interpreter/mterp/x86_64/invoke_polymorphic.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default { "helper":"UndefinedInvokeHandler" }
- /*
- * invoke-polymorphic handler wrapper.
- */
- /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
- .extern $helper
- EXPORT_PC
- movq rSELF, OUT_ARG0
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
- movq rPC, OUT_ARG2
- REFRESH_INST ${opnum}
- movl rINST, OUT_32_ARG3
- call SYMBOL($helper)
- testb %al, %al
- jz MterpException
- ADVANCE_PC 4
- call SYMBOL(MterpShouldSwitchInterpreters)
- testb %al, %al
- jnz MterpFallback
- FETCH_INST
- GOTO_NEXT
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
new file mode 100644
index 0000000..e283bbe
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -0,0 +1,744 @@
+%def header():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ Art assembly interpreter notes:
+
+ First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
+ handle invoke, allows higher-level code to create frame & shadow frame.
+
+ Once that's working, support direct entry code & eliminate shadow frame (and
+ excess locals allocation.
+
+ Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
+ base of the vreg array within the shadow frame. Access the other fields,
+ dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
+ the shadow frame mechanism of double-storing object references - via rFP &
+ number_of_vregs_.
+
+ */
+
+/*
+x86_64 ABI general notes:
+
+Caller save set:
+ rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+Callee save set:
+ rbx, rbp, r12-r15
+Return regs:
+ 32-bit in eax
+ 64-bit in rax
+ fp on xmm0
+
+First 8 fp parameters came in xmm0-xmm7.
+First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+Other parameters passed on stack, pushed right-to-left. On entry to target, first
+param is at 8(%esp). Traditional entry code is:
+
+Stack must be 16-byte aligned to support SSE in native code.
+
+If we're not doing variable stack allocation (alloca), the frame pointer can be
+eliminated and all arg references adjusted to be esp relative.
+*/
+
+/*
+Mterp and x86_64 notes:
+
+Some key interpreter variables will be assigned to registers.
+
+ nick reg purpose
+ rPROFILE rbp countdown register for jit profiling
+ rPC r12 interpreted program counter, used for fetching instructions
+ rFP r13 interpreted frame pointer, used for accessing locals and args
+ rINSTw bx first 16-bit code of current instruction
+ rINSTbl bl opcode portion of instruction word
+ rINSTbh bh high byte of inst word, usually contains src/tgt reg names
+ rIBASE r14 base of instruction handler table
+ rREFS r15 base of object references in shadow frame.
+
+Notes:
+ o High order 16 bits of ebx must be zero on entry to handler
+ o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
+ o eax and ecx are scratch, rINSTw/ebx sometimes scratch
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "interpreter/cfi_asm_support.h"
+
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $$(value)
+ #define FUNCTION_TYPE(name)
+ #define OBJECT_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+ #define ASM_HIDDEN .private_extern
+#else
+ #define MACRO_LITERAL(value) $$value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define OBJECT_TYPE(name) .type name, @object
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+ #define ASM_HIDDEN .hidden
+#endif
+
+.macro PUSH _reg
+ pushq \_reg
+ .cfi_adjust_cfa_offset 8
+ .cfi_rel_offset \_reg, 0
+.endm
+
+.macro POP _reg
+ popq \_reg
+ .cfi_adjust_cfa_offset -8
+ .cfi_restore \_reg
+.endm
+
+/*
+ * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
+ * to access other shadow frame fields, we need to use a backwards offset. Define those here.
+ */
+#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
+#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
+#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
+#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
+#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
+#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
+#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
+#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
+#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
+#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
+
+/* Frame size must be 16-byte aligned.
+ * Remember about 8 bytes for return address + 6 * 8 for spills.
+ */
+#define FRAME_SIZE 8
+
+/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
+#define IN_ARG3 %rcx
+#define IN_ARG2 %rdx
+#define IN_ARG1 %rsi
+#define IN_ARG0 %rdi
+/* Spill offsets relative to %esp */
+#define SELF_SPILL (FRAME_SIZE - 8)
+/* Out Args */
+#define OUT_ARG3 %rcx
+#define OUT_ARG2 %rdx
+#define OUT_ARG1 %rsi
+#define OUT_ARG0 %rdi
+#define OUT_32_ARG3 %ecx
+#define OUT_32_ARG2 %edx
+#define OUT_32_ARG1 %esi
+#define OUT_32_ARG0 %edi
+#define OUT_FP_ARG1 %xmm1
+#define OUT_FP_ARG0 %xmm0
+
+/* During bringup, we'll use the shadow frame model instead of rFP */
+/* single-purpose registers, given names for clarity */
+#define rSELF SELF_SPILL(%rsp)
+#define rPC %r12
+#define CFI_DEX 12 // DWARF register number of the register holding dex-pc (rPC).
+#define CFI_TMP 5 // DWARF register number of the first argument register (rdi).
+#define rFP %r13
+#define rINST %ebx
+#define rINSTq %rbx
+#define rINSTw %bx
+#define rINSTbh %bh
+#define rINSTbl %bl
+#define rIBASE %r14
+#define rREFS %r15
+#define rPROFILE %ebp
+
+#define MTERP_LOGGING 0
+
+/*
+ * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * It's okay to do this more than once.
+ *
+ * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
+ * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
+ * offset into the code_items_[] array. For effiency, we will "export" the
+ * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
+ * to convert to a dex pc when needed.
+ */
+.macro EXPORT_PC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+.endm
+
+/*
+ * Refresh handler table.
+ * IBase handles uses the caller save register so we must restore it after each call.
+ * Also it is used as a result of some 64-bit operations (like imul) and we should
+ * restore it in such cases also.
+ *
+ */
+.macro REFRESH_IBASE_REG self_reg
+ movq THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
+.endm
+.macro REFRESH_IBASE
+ movq rSELF, rIBASE
+ REFRESH_IBASE_REG rIBASE
+.endm
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+ movb rINSTbl, rINSTbh
+ movb $$\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw. Does not advance rPC.
+ */
+.macro FETCH_INST
+ movzwq (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+ movzx rINSTbl,%eax
+ movzbl rINSTbh,rINST
+ shll MACRO_LITERAL(${handler_size_bits}), %eax
+ addq rIBASE, %rax
+ jmp *%rax
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+ leaq 2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+ ADVANCE_PC \_count
+ FETCH_INST
+ GOTO_NEXT
+.endm
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
+
+.macro GET_VREG _reg _vreg
+ movl VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+ movq VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+ movq \_reg, VREG_ADDRESS(\_vreg)
+ xorq \_reg, \_reg
+ movq \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+ movl \_reg, VREG_ADDRESS(\_vreg)
+ movl \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+ movl VREG_HIGH_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+ movl \_reg, VREG_HIGH_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_REF _vreg
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+ movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+ movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+/*
+ * function support macros.
+ */
+.macro ENTRY name
+ .text
+ ASM_HIDDEN SYMBOL(\name)
+ .global SYMBOL(\name)
+ FUNCTION_TYPE(\name)
+SYMBOL(\name):
+.endm
+
+.macro END name
+ SIZE(\name,\name)
+.endm
+
+%def entry():
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ *
+ * On entry:
+ * 0 Thread* self
+ * 1 insns_
+ * 2 ShadowFrame
+ * 3 JValue* result_register
+ *
+ */
+
+ENTRY ExecuteMterpImpl
+ .cfi_startproc
+ .cfi_def_cfa rsp, 8
+
+ /* Spill callee save regs */
+ PUSH %rbx
+ PUSH %rbp
+ PUSH %r12
+ PUSH %r13
+ PUSH %r14
+ PUSH %r15
+
+ /* Allocate frame */
+ subq $$FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset FRAME_SIZE
+
+ /* Remember the return register */
+ movq IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
+
+ /* Remember the code_item */
+ movq IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
+
+ /* set up "named" registers */
+ movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
+ leaq SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
+ leaq (rFP, %rax, 4), rREFS
+ movl SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
+ leaq (IN_ARG1, %rax, 2), rPC
+ CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+ EXPORT_PC
+
+ /* Starting ibase */
+ movq IN_ARG0, rSELF
+ REFRESH_IBASE_REG IN_ARG0
+
+ /* Set up for backwards branches & osr profiling */
+ movq IN_ARG0, OUT_ARG2 /* Set up OUT_ARG2 before clobbering IN_ARG0 */
+ movq OFF_FP_METHOD(rFP), OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpSetUpHotnessCountdown)
+ movswl %ax, rPROFILE
+
+ /* start executing the instruction at rPC */
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+ // cfi info continues, and covers the whole mterp implementation.
+ END ExecuteMterpImpl
+
+%def dchecks_before_helper():
+ // Call C++ to do debug checks and return to the handler using tail call.
+ .extern MterpCheckBefore
+ popq %rax # Return address (the instuction handler).
+ REFRESH_IBASE
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rPC, OUT_ARG2
+ pushq %rax # Return address for the tail call.
+ jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
+
+%def opcode_pre():
+% add_helper(dchecks_before_helper, "Mterp_dchecks_before_helper")
+ #if !defined(NDEBUG)
+ call SYMBOL(Mterp_dchecks_before_helper)
+ #endif
+
+%def fallback():
+/* Transfer stub to alternate interpreter */
+ jmp MterpFallback
+
+
+%def helpers():
+ ENTRY MterpHelpers
+
+%def footer():
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+/*
+ * We've detected a condition that will result in an exception, but the exception
+ * has not yet been thrown. Just bail out to the reference interpreter to deal with it.
+ * TUNING: for consistency, we may want to just go ahead and handle these here.
+ */
+common_errDivideByZero:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogDivideByZeroException)
+#endif
+ jmp MterpCommonFallback
+
+common_errArrayIndex:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogArrayIndexException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNegativeArraySize:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNegativeArraySizeException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNoSuchMethod:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNoSuchMethodException)
+#endif
+ jmp MterpCommonFallback
+
+common_errNullObject:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogNullObjectException)
+#endif
+ jmp MterpCommonFallback
+
+common_exceptionThrown:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogExceptionThrownException)
+#endif
+ jmp MterpCommonFallback
+
+MterpSuspendFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
+ call SYMBOL(MterpLogSuspendFallback)
+#endif
+ jmp MterpCommonFallback
+
+/*
+ * If we're here, something is out of the ordinary. If there is a pending
+ * exception, handle it. Otherwise, roll back and retry with the reference
+ * interpreter.
+ */
+MterpPossibleException:
+ movq rSELF, %rcx
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+ jz MterpFallback
+ /* intentional fallthrough - handle pending exception. */
+
+/*
+ * On return from a runtime helper routine, we've found a pending exception.
+ * Can we handle it here - or need to bail out to caller?
+ *
+ */
+MterpException:
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpHandleException)
+ testb %al, %al
+ jz MterpExceptionReturn
+ movq OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
+ mov OFF_FP_DEX_PC(rFP), %ecx
+ leaq (%rax, %rcx, 2), rPC
+ movq rPC, OFF_FP_DEX_PC_PTR(rFP)
+ /* Do we need to switch interpreters? */
+ call SYMBOL(MterpShouldSwitchInterpreters)
+ testb %al, %al
+ jnz MterpFallback
+ /* resume execution at catch block */
+ REFRESH_IBASE
+ FETCH_INST
+ GOTO_NEXT
+ /* NOTE: no fallthrough */
+
+/*
+ * Common handling for branches with support for Jit profiling.
+ * On entry:
+ * rINST <= signed offset
+ * rPROFILE <= signed hotness countdown (expanded to 32 bits)
+ * condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
+ *
+ * We have quite a few different cases for branch profiling, OSR detection and
+ * suspend check support here.
+ *
+ * Taken backward branches:
+ * If profiling active, do hotness countdown and report if we hit zero.
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ * Is there a pending suspend request? If so, suspend.
+ *
+ * Taken forward branches and not-taken backward branches:
+ * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
+ *
+ * Our most common case is expected to be a taken backward branch with active jit profiling,
+ * but no full OSR check and no pending suspend request.
+ * Next most common case is not-taken branch with no full OSR check.
+ *
+ */
+MterpCommonTakenBranch:
+ jg .L_forward_branch # don't add forward branches to hotness
+/*
+ * We need to subtract 1 from positive values and we should not see 0 here,
+ * so we may use the result of the comparison with -1.
+ */
+#if JIT_CHECK_OSR != -1
+# error "JIT_CHECK_OSR must be -1."
+#endif
+ cmpl $$JIT_CHECK_OSR, rPROFILE
+ je .L_osr_check
+ decl rPROFILE
+ je .L_add_batch # counted down to zero - report
+.L_resume_backward_branch:
+ movq rSELF, %rax
+ testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
+ REFRESH_IBASE_REG %rax
+ leaq (rPC, rINSTq, 2), rPC
+ FETCH_INST
+ jnz .L_suspend_request_pending
+ GOTO_NEXT
+
+.L_suspend_request_pending:
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ call SYMBOL(MterpSuspendCheck) # (self)
+ testb %al, %al
+ jnz MterpFallback
+ REFRESH_IBASE # might have changed during suspend
+ GOTO_NEXT
+
+.L_no_count_backwards:
+ cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
+ jne .L_resume_backward_branch
+.L_osr_check:
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ jz .L_resume_backward_branch
+ jmp MterpOnStackReplacement
+
+.L_forward_branch:
+ cmpl $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry?
+ je .L_check_osr_forward
+.L_resume_forward_branch:
+ leaq (rPC, rINSTq, 2), rPC
+ FETCH_INST
+ GOTO_NEXT
+
+.L_check_osr_forward:
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ jz .L_resume_forward_branch
+ jmp MterpOnStackReplacement
+
+.L_add_batch:
+ movl rPROFILE, %eax
+ movq OFF_FP_METHOD(rFP), OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
+ movq rSELF, OUT_ARG2
+ call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ movswl %ax, rPROFILE
+ jmp .L_no_count_backwards
+
+/*
+ * Entered from the conditional branch handlers when OSR check request active on
+ * not-taken path. All Dalvik not-taken conditional branch offsets are 2.
+ */
+.L_check_not_taken_osr:
+ EXPORT_PC
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl $$2, OUT_32_ARG2
+ call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
+ testb %al, %al
+ jnz MterpOnStackReplacement
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+/*
+ * On-stack replacement has happened, and now we've returned from the compiled method.
+ */
+MterpOnStackReplacement:
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpLogOSR)
+#endif
+ movl $$1, %eax
+ jmp MterpDone
+
+/*
+ * Bail out to reference interpreter.
+ */
+MterpFallback:
+ EXPORT_PC
+#if MTERP_LOGGING
+ movq rSELF, OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ call SYMBOL(MterpLogFallback)
+#endif
+MterpCommonFallback:
+ xorl %eax, %eax
+ jmp MterpDone
+
+/*
+ * On entry:
+ * uint32_t* rFP (should still be live, pointer to base of vregs)
+ */
+MterpExceptionReturn:
+ movl $$1, %eax
+ jmp MterpDone
+MterpReturn:
+ movq OFF_FP_RESULT_REGISTER(rFP), %rdx
+ movq %rax, (%rdx)
+ movl $$1, %eax
+MterpDone:
+/*
+ * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
+ * checking for OSR. If greater than zero, we might have unreported hotness to register
+ * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
+ * should only reach zero immediately after a hotness decrement, and is then reset to either
+ * a negative special state or the new non-zero countdown value.
+ */
+ testl rPROFILE, rPROFILE
+ jle MRestoreFrame # if > 0, we may have some counts to report.
+
+ movl %eax, rINST # stash return value
+ /* Report cached hotness counts */
+ movl rPROFILE, %eax
+ movq OFF_FP_METHOD(rFP), OUT_ARG0
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
+ movw %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
+ movq rSELF, OUT_ARG2
+ call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self)
+ movl rINST, %eax # restore return value
+
+ /* pop up frame */
+MRestoreFrame:
+ addq $$FRAME_SIZE, %rsp
+ .cfi_adjust_cfa_offset -FRAME_SIZE
+
+ /* Restore callee save register */
+ POP %r15
+ POP %r14
+ POP %r13
+ POP %r12
+ POP %rbp
+ POP %rbx
+ ret
+ .cfi_endproc
+ END MterpHelpers
+
+%def instruction_end():
+
+ OBJECT_TYPE(artMterpAsmInstructionEnd)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
+ .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
+
+%def instruction_start():
+
+ OBJECT_TYPE(artMterpAsmInstructionStart)
+ ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
+ .global SYMBOL(artMterpAsmInstructionStart)
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
+ .text
+
+%def opcode_start():
+ ENTRY Mterp_${opcode}
+%def opcode_end():
+ END Mterp_${opcode}
+%def helper_start(name):
+ ENTRY ${name}
+%def helper_end(name):
+ END ${name}
diff --git a/runtime/interpreter/mterp/x86_64/object.S b/runtime/interpreter/mterp/x86_64/object.S
new file mode 100644
index 0000000..fa85f69
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/object.S
@@ -0,0 +1,254 @@
+%def field(helper=""):
+ /*
+ * General field read / write (iget-* iput-* sget-* sput-*).
+ */
+ .extern $helper
+ REFRESH_INST ${opnum} # fix rINST to include opcode
+ movq rPC, OUT_ARG0 # arg0: Instruction* inst
+ movl rINST, OUT_32_ARG1 # arg1: uint16_t inst_data
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2 # arg2: ShadowFrame* sf
+ movq rSELF, OUT_ARG3 # arg3: Thread* self
+ call SYMBOL($helper)
+ testb %al, %al
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_check_cast():
+/*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
+ leaq VREG_ADDRESS(rINSTq), OUT_ARG1
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget(is_object="0", helper="MterpIGetU32"):
+% field(helper=helper)
+
+%def op_iget_boolean():
+% op_iget(helper="MterpIGetU8")
+
+%def op_iget_boolean_quick():
+% op_iget_quick(load="movsbl")
+
+%def op_iget_byte():
+% op_iget(helper="MterpIGetI8")
+
+%def op_iget_byte_quick():
+% op_iget_quick(load="movsbl")
+
+%def op_iget_char():
+% op_iget(helper="MterpIGetU16")
+
+%def op_iget_char_quick():
+% op_iget_quick(load="movzwl")
+
+%def op_iget_object():
+% op_iget(is_object="1", helper="MterpIGetObj")
+
+%def op_iget_object_quick():
+ /* For: iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ .extern artIGetObjectFromMterp
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
+ movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
+ EXPORT_PC
+ callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
+ movq rSELF, %rcx
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+ jnz MterpException # bail out
+ andb $$0xf, rINSTbl # rINST <- A
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_quick(load="movl", wide="0"):
+ /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+ /* op vA, vB, offset@CCCC */
+ movl rINST, %ecx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ movzwq 2(rPC), %rax # eax <- field byte offset
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $$0xf,rINSTbl # rINST <- A
+ .if $wide
+ movq (%rcx,%rax,1), %rax
+ SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
+ .else
+ ${load} (%rcx,%rax,1), %eax
+ SET_VREG %eax, rINSTq # fp[A] <- value
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_short():
+% op_iget(helper="MterpIGetI16")
+
+%def op_iget_short_quick():
+% op_iget_quick(load="movswl")
+
+%def op_iget_wide():
+% op_iget(helper="MterpIGetU64")
+
+%def op_iget_wide_quick():
+% op_iget_quick(load="movswl", wide="1")
+
+%def op_instance_of():
+/*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ EXPORT_PC
+ movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
+ movl rINST, %eax # eax <- BA
+ sarl $$4, %eax # eax <- B
+ leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
+ movq OFF_FP_METHOD(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
+ movsbl %al, %eax
+ movq rSELF, %rcx
+ cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+ jnz MterpException
+ andb $$0xf, rINSTbl # rINSTbl <- A
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput(is_object="0", helper="MterpIPutU32"):
+% field(helper=helper)
+
+%def op_iput_boolean():
+% op_iput(helper="MterpIPutU8")
+
+%def op_iput_boolean_quick():
+% op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_byte():
+% op_iput(helper="MterpIPutI8")
+
+%def op_iput_byte_quick():
+% op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_char():
+% op_iput(helper="MterpIPutU16")
+
+%def op_iput_char_quick():
+% op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_object():
+% op_iput(is_object="1", helper="MterpIPutObj")
+
+%def op_iput_object_quick():
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rPC, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movl rINST, OUT_32_ARG2
+ call SYMBOL(MterpIputObjectQuick)
+ testb %al, %al
+ jz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_quick(reg="rINST", store="movl"):
+ /* For: iput-quick, iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx <- BA
+ sarl $$4, %ecx # ecx <- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ andb $$0xf, rINSTbl # rINST <- A
+ GET_VREG rINST, rINSTq # rINST <- v[A]
+ movzwq 2(rPC), %rax # rax <- field byte offset
+ ${store} ${reg}, (%rcx,%rax,1)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_short():
+% op_iput(helper="MterpIPutI16")
+
+%def op_iput_short_quick():
+% op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_wide():
+% op_iput(helper="MterpIPutU64")
+
+%def op_iput_wide_quick():
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ movzbq rINSTbl, %rcx # rcx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG %ecx, %rcx # vB (object we're operating on)
+ testl %ecx, %ecx # is object null?
+ je common_errNullObject
+ movzwq 2(rPC), %rax # rax<- field byte offset
+ leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
+ andb $$0xf, rINSTbl # rINST<- A
+ GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
+ movq %rax, (%rcx) # obj.field<- r0/r1
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_new_instance():
+/*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ EXPORT_PC
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
+ movq rSELF, OUT_ARG1
+ REFRESH_INST ${opnum}
+ movq rINSTq, OUT_ARG2
+ call SYMBOL(MterpNewInstance)
+ testb %al, %al # 0 means an exception is thrown
+ jz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_sget(is_object="0", helper="MterpSGetU32"):
+% field(helper=helper)
+
+%def op_sget_boolean():
+% op_sget(helper="MterpSGetU8")
+
+%def op_sget_byte():
+% op_sget(helper="MterpSGetI8")
+
+%def op_sget_char():
+% op_sget(helper="MterpSGetU16")
+
+%def op_sget_object():
+% op_sget(is_object="1", helper="MterpSGetObj")
+
+%def op_sget_short():
+% op_sget(helper="MterpSGetI16")
+
+%def op_sget_wide():
+% op_sget(helper="MterpSGetU64")
+
+%def op_sput(is_object="0", helper="MterpSPutU32"):
+% field(helper=helper)
+
+%def op_sput_boolean():
+% op_sput(helper="MterpSPutU8")
+
+%def op_sput_byte():
+% op_sput(helper="MterpSPutI8")
+
+%def op_sput_char():
+% op_sput(helper="MterpSPutU16")
+
+%def op_sput_object():
+% op_sput(is_object="1", helper="MterpSPutObj")
+
+%def op_sput_short():
+% op_sput(helper="MterpSPutI16")
+
+%def op_sput_wide():
+% op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double.S b/runtime/interpreter/mterp/x86_64/op_add_double.S
deleted file mode 100644
index cb462cb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
deleted file mode 100644
index 063bde3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float.S b/runtime/interpreter/mterp/x86_64/op_add_float.S
deleted file mode 100644
index 7753bf8..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
deleted file mode 100644
index 6c8005b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"adds","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int.S b/runtime/interpreter/mterp/x86_64/op_add_int.S
deleted file mode 100644
index e316be7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"addl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
deleted file mode 100644
index 2ff8293..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"addl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
deleted file mode 100644
index bfeb7ca..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
deleted file mode 100644
index 8954844..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"addl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long.S b/runtime/interpreter/mterp/x86_64/op_add_long.S
deleted file mode 100644
index 89131ff..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"addq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
deleted file mode 100644
index fed98bc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_add_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"addq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aget.S b/runtime/interpreter/mterp/x86_64/op_aget.S
deleted file mode 100644
index 58d4948..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget.S
+++ /dev/null
@@ -1,24 +0,0 @@
-%default { "load":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
-/*
- * Array get, 32 bits or less. vAA <- vBB[vCC].
- *
- * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # eax <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if $wide
- movq $data_offset(%rax,%rcx,8), %rax
- SET_WIDE_VREG %rax, rINSTq
- .else
- $load $data_offset(%rax,%rcx,$shift), %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S b/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
deleted file mode 100644
index cf7bdb5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movzbl", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_byte.S b/runtime/interpreter/mterp/x86_64/op_aget_byte.S
deleted file mode 100644
index 1cbb569..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movsbl", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_char.S b/runtime/interpreter/mterp/x86_64/op_aget_char.S
deleted file mode 100644
index 45c9085..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movzwl", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_object.S b/runtime/interpreter/mterp/x86_64/op_aget_object.S
deleted file mode 100644
index 5f77a97..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_object.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Array object get. vAA <- vBB[vCC].
- *
- * for: aget-object
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG OUT_32_ARG0, %rax # eax <- vBB (array object)
- GET_VREG OUT_32_ARG1, %rcx # ecx <- vCC (requested index)
- EXPORT_PC
- call SYMBOL(artAGetObjectFromMterp) # (array, index)
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- SET_VREG_OBJECT %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_short.S b/runtime/interpreter/mterp/x86_64/op_aget_short.S
deleted file mode 100644
index 82c4a1d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movswl", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aget_wide.S b/runtime/interpreter/mterp/x86_64/op_aget_wide.S
deleted file mode 100644
index 4f2771b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aget.S" { "load":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int.S b/runtime/interpreter/mterp/x86_64/op_and_int.S
deleted file mode 100644
index 4469889..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"andl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
deleted file mode 100644
index 16315bb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"andl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
deleted file mode 100644
index 63e851b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
deleted file mode 100644
index da7a20f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"andl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long.S b/runtime/interpreter/mterp/x86_64/op_and_long.S
deleted file mode 100644
index ce1dd26..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"andq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
deleted file mode 100644
index d17ab8d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_and_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"andq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_aput.S b/runtime/interpreter/mterp/x86_64/op_aput.S
deleted file mode 100644
index 11500ad..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput.S
+++ /dev/null
@@ -1,23 +0,0 @@
-%default { "reg":"rINST", "store":"movl", "shift":"4", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET", "wide":"0" }
-/*
- * Array put, 32 bits or less. vBB[vCC] <- vAA.
- *
- * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
- *
- */
- /* op vAA, vBB, vCC */
- movzbq 2(rPC), %rax # rax <- BB
- movzbq 3(rPC), %rcx # rcx <- CC
- GET_VREG %eax, %rax # eax <- vBB (array object)
- GET_VREG %ecx, %rcx # ecx <- vCC (requested index)
- testl %eax, %eax # null array object?
- je common_errNullObject # bail if so
- cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
- jae common_errArrayIndex # index >= length, bail.
- .if $wide
- GET_WIDE_VREG rINSTq, rINSTq
- .else
- GET_VREG rINST, rINSTq
- .endif
- $store $reg, $data_offset(%rax,%rcx,$shift)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S b/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
deleted file mode 100644
index 7d77a86..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_byte.S b/runtime/interpreter/mterp/x86_64/op_aput_byte.S
deleted file mode 100644
index 7a1723e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTbl", "store":"movb", "shift":"1", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_char.S b/runtime/interpreter/mterp/x86_64/op_aput_char.S
deleted file mode 100644
index f8f50a3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_object.S b/runtime/interpreter/mterp/x86_64/op_aput_object.S
deleted file mode 100644
index b1bae0f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_object.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Store an object into an array. vBB[vCC] <- vAA.
- */
- /* op vAA, vBB, vCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpAputObject) # (array, index)
- testb %al, %al
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_short.S b/runtime/interpreter/mterp/x86_64/op_aput_short.S
deleted file mode 100644
index 481fd68..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTw", "store":"movw", "shift":"2", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }
diff --git a/runtime/interpreter/mterp/x86_64/op_aput_wide.S b/runtime/interpreter/mterp/x86_64/op_aput_wide.S
deleted file mode 100644
index 5bbd39b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_aput_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_aput.S" { "reg":"rINSTq", "store":"movq", "shift":"8", "data_offset":"MIRROR_WIDE_ARRAY_DATA_OFFSET", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_array_length.S b/runtime/interpreter/mterp/x86_64/op_array_length.S
deleted file mode 100644
index e80d665..0000000
--- a/runtime/interpreter/mterp/x86_64/op_array_length.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Return the length of an array.
- */
- movl rINST, %eax # eax <- BA
- sarl $$4, rINST # rINST <- B
- GET_VREG %ecx, rINSTq # ecx <- vB (object ref)
- testl %ecx, %ecx # is null?
- je common_errNullObject
- andb $$0xf, %al # eax <- A
- movl MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
- SET_VREG rINST, %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_check_cast.S b/runtime/interpreter/mterp/x86_64/op_check_cast.S
deleted file mode 100644
index f8fa7b2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_check_cast.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Check to see if a cast from one class to another is allowed.
- */
- /* check-cast vAA, class@BBBB */
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # OUT_ARG0 <- BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmp_long.S b/runtime/interpreter/mterp/x86_64/op_cmp_long.S
deleted file mode 100644
index 23ca3e5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmp_long.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
- * register based on the results of the comparison.
- */
- /* cmp-long vAA, vBB, vCC */
- movzbq 2(rPC), %rdx # edx <- BB
- movzbq 3(rPC), %rcx # ecx <- CC
- GET_WIDE_VREG %rdx, %rdx # rdx <- v[BB]
- xorl %eax, %eax
- xorl %edi, %edi
- addb $$1, %al
- movl $$-1, %esi
- cmpq VREG_ADDRESS(%rcx), %rdx
- cmovl %esi, %edi
- cmovg %eax, %edi
- SET_VREG %edi, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S b/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
deleted file mode 100644
index 7c0aa1b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"d","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S b/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
deleted file mode 100644
index 14e8472..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"s","nanval":"pos"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S b/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
deleted file mode 100644
index 1d4c424..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpl_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"d","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S b/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
deleted file mode 100644
index 97a12a6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_cmpl_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcmp.S" {"suff":"s","nanval":"neg"}
diff --git a/runtime/interpreter/mterp/x86_64/op_const.S b/runtime/interpreter/mterp/x86_64/op_const.S
deleted file mode 100644
index 3cfafdb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const vAA, #+BBBBbbbb */
- movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax, rINSTq # vAA<- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_16.S b/runtime/interpreter/mterp/x86_64/op_const_16.S
deleted file mode 100644
index 1a139c6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const/16 vAA, #+BBBB */
- movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_4.S b/runtime/interpreter/mterp/x86_64/op_const_4.S
deleted file mode 100644
index 23c4816..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_4.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* const/4 vA, #+B */
- movsbl rINSTbl, %eax # eax <-ssssssBx
- movl $$0xf, rINST
- andl %eax, rINST # rINST <- A
- sarl $$4, %eax
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_const_class.S b/runtime/interpreter/mterp/x86_64/op_const_class.S
deleted file mode 100644
index 0c402e1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_class.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstClass" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_high16.S b/runtime/interpreter/mterp/x86_64/op_const_high16.S
deleted file mode 100644
index 64e633c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
- /* const/high16 vAA, #+BBBB0000 */
- movzwl 2(rPC), %eax # eax <- 0000BBBB
- sall $$16, %eax # eax <- BBBB0000
- SET_VREG %eax, rINSTq # vAA <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_handle.S b/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
deleted file mode 100644
index 2b8b0a2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_method_handle.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstMethodHandle" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_method_type.S b/runtime/interpreter/mterp/x86_64/op_const_method_type.S
deleted file mode 100644
index 33ce952..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_method_type.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstMethodType" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string.S b/runtime/interpreter/mterp/x86_64/op_const_string.S
deleted file mode 100644
index 5a29bd3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_string.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/const.S" { "helper":"MterpConstString" }
diff --git a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
deleted file mode 100644
index ae03d20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_string_jumbo.S
+++ /dev/null
@@ -1,10 +0,0 @@
- /* const/string vAA, String@BBBBBBBB */
- EXPORT_PC
- movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
- movq rINSTq, OUT_ARG1
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide.S b/runtime/interpreter/mterp/x86_64/op_const_wide.S
deleted file mode 100644
index 5615177..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
- movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
deleted file mode 100644
index 593b624..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_16.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const-wide/16 vAA, #+BBBB */
- movswq 2(rPC), %rax # rax <- ssssBBBB
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S b/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
deleted file mode 100644
index 5ef3636..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_32.S
+++ /dev/null
@@ -1,4 +0,0 @@
- /* const-wide/32 vAA, #+BBBBbbbb */
- movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
- SET_WIDE_VREG %rax, rINSTq # store
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S b/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
deleted file mode 100644
index b86b4e5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_const_wide_high16.S
+++ /dev/null
@@ -1,5 +0,0 @@
- /* const-wide/high16 vAA, #+BBBB000000000000 */
- movzwq 2(rPC), %rax # eax <- 0000BBBB
- salq $$48, %rax # eax <- BBBB0000
- SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double.S b/runtime/interpreter/mterp/x86_64/op_div_double.S
deleted file mode 100644
index 45c700c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
deleted file mode 100644
index 83f270e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float.S b/runtime/interpreter/mterp/x86_64/op_div_float.S
deleted file mode 100644
index aa90b24..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
deleted file mode 100644
index f0f8f1a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"divs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int.S b/runtime/interpreter/mterp/x86_64/op_div_int.S
deleted file mode 100644
index bba5a17..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
deleted file mode 100644
index fa4255d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%eax","second":"%ecx","wide":"0","suffix":"l"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
deleted file mode 100644
index 3fa1e09..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit16.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
deleted file mode 100644
index 859883e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit8.S" {"result":"%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long.S b/runtime/interpreter/mterp/x86_64/op_div_long.S
deleted file mode 100644
index a061a88..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
deleted file mode 100644
index 8886e68..0000000
--- a/runtime/interpreter/mterp/x86_64/op_div_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%rax","second":"%rcx","wide":"1","suffix":"q","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_float.S b/runtime/interpreter/mterp/x86_64/op_double_to_float.S
deleted file mode 100644
index cea1482..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"d","dest_suffix":"s","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_int.S b/runtime/interpreter/mterp/x86_64/op_double_to_int.S
deleted file mode 100644
index a9965ed..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_double_to_long.S b/runtime/interpreter/mterp/x86_64/op_double_to_long.S
deleted file mode 100644
index 179e6a1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_double_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"d","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S b/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
deleted file mode 100644
index 7ea36a6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_fill_array_data.S
+++ /dev/null
@@ -1,9 +0,0 @@
- /* fill-array-data vAA, +BBBBBBBB */
- EXPORT_PC
- movslq 2(rPC), %rcx # rcx <- ssssssssBBBBbbbb
- leaq (rPC,%rcx,2), OUT_ARG1 # OUT_ARG1 <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG0, rINSTq # OUT_ARG0 <- vAA (array object)
- call SYMBOL(MterpFillArrayData) # (obj, payload)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
deleted file mode 100644
index a7f7ddc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_filled_new_array.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"MterpFilledNewArray" }
-/*
- * Create a new array with elements filled from registers.
- *
- * for: filled-new-array, filled-new-array/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
- .extern $helper
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- movq rSELF, OUT_ARG2
- call SYMBOL($helper)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S b/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
deleted file mode 100644
index 4ca79a3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_filled_new_array_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_double.S b/runtime/interpreter/mterp/x86_64/op_float_to_double.S
deleted file mode 100644
index 7855205..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"s","dest_suffix":"d","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_int.S b/runtime/interpreter/mterp/x86_64/op_float_to_int.S
deleted file mode 100644
index cb90555..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"l","max_const":"$0x7fffffff","result_reg":"%eax","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_float_to_long.S b/runtime/interpreter/mterp/x86_64/op_float_to_long.S
deleted file mode 100644
index 96bb4ee..0000000
--- a/runtime/interpreter/mterp/x86_64/op_float_to_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/cvtfp_int.S" {"fp_suffix":"s","i_suffix":"q","max_const":"$0x7fffffffffffffff","result_reg":"%rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S
deleted file mode 100644
index 9749901..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 8-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto +AA */
- movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S
deleted file mode 100644
index 77688e0..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto_16.S
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Unconditional branch, 16-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- */
- /* goto/16 +AAAA */
- movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S
deleted file mode 100644
index 29d777b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_goto_32.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Unconditional branch, 32-bit offset.
- *
- * The branch distance is a signed code-unit offset, which we need to
- * double to get a byte offset.
- *
- * Because we need the SF bit set, we'll use an adds
- * to convert from Dalvik offset to byte offset.
- */
- /* goto/32 +AAAAAAAA */
- movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eq.S b/runtime/interpreter/mterp/x86_64/op_if_eq.S
deleted file mode 100644
index d56ce72..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_eq.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_eqz.S b/runtime/interpreter/mterp/x86_64/op_if_eqz.S
deleted file mode 100644
index a0fc444..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_eqz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"ne" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ge.S b/runtime/interpreter/mterp/x86_64/op_if_ge.S
deleted file mode 100644
index a7832ef..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ge.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gez.S b/runtime/interpreter/mterp/x86_64/op_if_gez.S
deleted file mode 100644
index f9af5db..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"l" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gt.S b/runtime/interpreter/mterp/x86_64/op_if_gt.S
deleted file mode 100644
index 70f2b9e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_gtz.S b/runtime/interpreter/mterp/x86_64/op_if_gtz.S
deleted file mode 100644
index 2fb0d50..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_gtz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"le" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_le.S b/runtime/interpreter/mterp/x86_64/op_if_le.S
deleted file mode 100644
index 321962a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_le.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lez.S b/runtime/interpreter/mterp/x86_64/op_if_lez.S
deleted file mode 100644
index d3dc334..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_lez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"g" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_lt.S b/runtime/interpreter/mterp/x86_64/op_if_lt.S
deleted file mode 100644
index f028005..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_lt.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ltz.S b/runtime/interpreter/mterp/x86_64/op_if_ltz.S
deleted file mode 100644
index 383d73a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ltz.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"ge" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_ne.S b/runtime/interpreter/mterp/x86_64/op_if_ne.S
deleted file mode 100644
index ac6e063..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_ne.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bincmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_if_nez.S b/runtime/interpreter/mterp/x86_64/op_if_nez.S
deleted file mode 100644
index c96e4f3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_if_nez.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/zcmp.S" { "revcmp":"e" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget.S b/runtime/interpreter/mterp/x86_64/op_iget.S
deleted file mode 100644
index 5c6cab6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget.S
+++ /dev/null
@@ -1,28 +0,0 @@
-%default { "is_object":"0", "helper":"MterpIGetU32", "wide":"0"}
-/*
- * General instance field get.
- *
- * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short, iget-wide
- */
- EXPORT_PC
- movzbq rINSTbl, %rcx # rcx <- BA
- movzwl 2(rPC), OUT_32_ARG0 # eax <- field ref CCCC
- sarl $$4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3
- call SYMBOL($helper)
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $$0xf, rINSTbl # rINST <- A
- .if $is_object
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <-value
- .else
- .if $wide
- SET_WIDE_VREG %rax, rINSTq # fp[A] <-value
- .else
- SET_VREG %eax, rINSTq # fp[A] <-value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
deleted file mode 100644
index 18e9264..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
deleted file mode 100644
index 07139c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte.S b/runtime/interpreter/mterp/x86_64/op_iget_byte.S
deleted file mode 100644
index bec0ad5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
deleted file mode 100644
index 07139c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movsbl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char.S b/runtime/interpreter/mterp/x86_64/op_iget_char.S
deleted file mode 100644
index 5e22b88..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
deleted file mode 100644
index 8cb3be3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movzwl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object.S b/runtime/interpreter/mterp/x86_64/op_iget_object.S
deleted file mode 100644
index bcef1d2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "is_object":"1", "helper":"MterpIGetObj" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
deleted file mode 100644
index 176c954..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_object_quick.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* For: iget-object-quick */
- /* op vA, vB, offset@CCCC */
- .extern artIGetObjectFromMterp
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG0, %rcx # vB (object we're operating on)
- movzwl 2(rPC), OUT_32_ARG1 # eax <- field byte offset
- EXPORT_PC
- callq SYMBOL(artIGetObjectFromMterp) # (obj, offset)
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException # bail out
- andb $$0xf, rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_quick.S
deleted file mode 100644
index bfb7530..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_quick.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "load":"movl", "wide":"0"}
- /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
- /* op vA, vB, offset@CCCC */
- movl rINST, %ecx # rcx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- movzwq 2(rPC), %rax # eax <- field byte offset
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $$0xf,rINSTbl # rINST <- A
- .if $wide
- movq (%rcx,%rax,1), %rax
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- ${load} (%rcx,%rax,1), %eax
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short.S b/runtime/interpreter/mterp/x86_64/op_iget_short.S
deleted file mode 100644
index 14c49f7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
deleted file mode 100644
index 56ca858..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movswl" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide.S b/runtime/interpreter/mterp/x86_64/op_iget_wide.S
deleted file mode 100644
index d9d1744..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget.S" { "helper":"MterpIGetU64", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
deleted file mode 100644
index 169d625..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iget_wide_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iget_quick.S" { "load":"movswl", "wide":"1" }
diff --git a/runtime/interpreter/mterp/x86_64/op_instance_of.S b/runtime/interpreter/mterp/x86_64/op_instance_of.S
deleted file mode 100644
index 4819833..0000000
--- a/runtime/interpreter/mterp/x86_64/op_instance_of.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Check to see if an object reference is an instance of a class.
- *
- * Most common situation is a non-null object, being compared against
- * an already-resolved class.
- */
- /* instance-of vA, vB, class@CCCC */
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- CCCC
- movl rINST, %eax # eax <- BA
- sarl $$4, %eax # eax <- B
- leaq VREG_ADDRESS(%rax), OUT_ARG1 # Get object address
- movq OFF_FP_METHOD(rFP), OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
- movsbl %al, %eax
- movq rSELF, %rcx
- cmpq $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- andb $$0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax, rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S b/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
deleted file mode 100644
index f4e578f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movsbl %al, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_char.S b/runtime/interpreter/mterp/x86_64/op_int_to_char.S
deleted file mode 100644
index c1bf17f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movzwl %ax,%eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_double.S b/runtime/interpreter/mterp/x86_64/op_int_to_double.S
deleted file mode 100644
index 27ebf42..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dl","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_float.S b/runtime/interpreter/mterp/x86_64/op_int_to_float.S
deleted file mode 100644
index 5a98d44..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sl","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_long.S b/runtime/interpreter/mterp/x86_64/op_int_to_long.S
deleted file mode 100644
index 9281137..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_long.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* int to long vA, vB */
- movzbq rINSTbl, %rax # rax <- +A
- sarl $$4, %eax # eax <- B
- andb $$0xf, rINSTbl # rINST <- A
- movslq VREG_ADDRESS(%rax), %rax
- SET_WIDE_VREG %rax, rINSTq # v[A] <- %rax
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
-
diff --git a/runtime/interpreter/mterp/x86_64/op_int_to_short.S b/runtime/interpreter/mterp/x86_64/op_int_to_short.S
deleted file mode 100644
index 6ae6b50..0000000
--- a/runtime/interpreter/mterp/x86_64/op_int_to_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":"movswl %ax, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
deleted file mode 100644
index f4011f6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_custom.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeCustom" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
deleted file mode 100644
index 94612c4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_custom_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeCustomRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
deleted file mode 100644
index 9628589..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_direct.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeDirect" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
deleted file mode 100644
index 09ac881..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_direct_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeDirectRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
deleted file mode 100644
index 76d9cd4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_interface.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeInterface" }
-/*
- * Handle an interface method call.
- *
- * for: invoke-interface, invoke-interface/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
deleted file mode 100644
index 785b43c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_interface_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeInterfaceRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
deleted file mode 100644
index 4529445..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphic" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
deleted file mode 100644
index 01981c1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_polymorphic_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke_polymorphic.S" { "helper":"MterpInvokePolymorphicRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static.S b/runtime/interpreter/mterp/x86_64/op_invoke_static.S
deleted file mode 100644
index dd8027d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_static.S
+++ /dev/null
@@ -1,2 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeStatic" }
-
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
deleted file mode 100644
index ee26074..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_static_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeStaticRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super.S b/runtime/interpreter/mterp/x86_64/op_invoke_super.S
deleted file mode 100644
index d07f8d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_super.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeSuper" }
-/*
- * Handle a "super" method call.
- *
- * for: invoke-super, invoke-super/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
deleted file mode 100644
index 7245cfd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_super_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeSuperRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
deleted file mode 100644
index 19c708b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual.S
+++ /dev/null
@@ -1,8 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtual" }
-/*
- * Handle a virtual method call.
- *
- * for: invoke-virtual, invoke-virtual/range
- */
- /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
- /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
deleted file mode 100644
index 313bd05..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuick" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
deleted file mode 100644
index 424ad32..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
deleted file mode 100644
index 556f718..0000000
--- a/runtime/interpreter/mterp/x86_64/op_invoke_virtual_range_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput.S b/runtime/interpreter/mterp/x86_64/op_iput.S
deleted file mode 100644
index 12affdb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput.S
+++ /dev/null
@@ -1,20 +0,0 @@
-%default { "helper":"MterpIPutU32"}
-/*
- * General 32-bit instance field put.
- *
- * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
- */
- /* op vA, vB, field@CCCC */
- .extern $helper
- EXPORT_PC
- movzwl 2(rPC), OUT_32_ARG0 # field ref <- 0000CCCC
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $$4, %ecx # ecx<- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $$0xf, rINSTbl # rINST<- A
- GET_VREG OUT_32_ARG2, rINSTq # fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL($helper)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
deleted file mode 100644
index 06bbd70..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutU8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
deleted file mode 100644
index 6bd060e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_boolean_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte.S b/runtime/interpreter/mterp/x86_64/op_iput_byte.S
deleted file mode 100644
index 53f9008..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutI8" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
deleted file mode 100644
index 6bd060e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_byte_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTbl", "store":"movb" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char.S b/runtime/interpreter/mterp/x86_64/op_iput_char.S
deleted file mode 100644
index 4736f5e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutU16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
deleted file mode 100644
index 3da96d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_char_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object.S b/runtime/interpreter/mterp/x86_64/op_iput_object.S
deleted file mode 100644
index 22648cd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_object.S
+++ /dev/null
@@ -1,10 +0,0 @@
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movl rINST, OUT_32_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpIPutObj)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
deleted file mode 100644
index b5b128a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_object_quick.S
+++ /dev/null
@@ -1,9 +0,0 @@
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movl rINST, OUT_32_ARG2
- call SYMBOL(MterpIputObjectQuick)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_quick.S
deleted file mode 100644
index ecaf98e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_quick.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "reg":"rINST", "store":"movl" }
- /* For: iput-quick, iput-object-quick */
- /* op vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- andb $$0xf, rINSTbl # rINST <- A
- GET_VREG rINST, rINSTq # rINST <- v[A]
- movzwq 2(rPC), %rax # rax <- field byte offset
- ${store} ${reg}, (%rcx,%rax,1)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short.S b/runtime/interpreter/mterp/x86_64/op_iput_short.S
deleted file mode 100644
index dca5735..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput.S" { "helper":"MterpIPutI16" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
deleted file mode 100644
index 3da96d5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_short_quick.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_iput_quick.S" { "reg":"rINSTw", "store":"movw" }
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide.S b/runtime/interpreter/mterp/x86_64/op_iput_wide.S
deleted file mode 100644
index 4f8c47c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_wide.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* iput-wide vA, vB, field@CCCC */
- .extern MterpIPutU64
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movzbq rINSTbl, %rcx # rcx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG OUT_32_ARG1, %rcx # the object pointer
- andb $$0xf, rINSTbl # rINST <- A
- leaq VREG_ADDRESS(rINSTq), OUT_ARG2 # &fp[A]
- movq OFF_FP_METHOD(rFP), OUT_ARG3 # referrer
- call SYMBOL(MterpIPutU64)
- testb %al, %al
- jnz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
deleted file mode 100644
index 473189d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_iput_wide_quick.S
+++ /dev/null
@@ -1,12 +0,0 @@
- /* iput-wide-quick vA, vB, offset@CCCC */
- movzbq rINSTbl, %rcx # rcx<- BA
- sarl $$4, %ecx # ecx<- B
- GET_VREG %ecx, %rcx # vB (object we're operating on)
- testl %ecx, %ecx # is object null?
- je common_errNullObject
- movzwq 2(rPC), %rax # rax<- field byte offset
- leaq (%rcx,%rax,1), %rcx # ecx<- Address of 64-bit target
- andb $$0xf, rINSTbl # rINST<- A
- GET_WIDE_VREG %rax, rINSTq # rax<- fp[A]/fp[A+1]
- movq %rax, (%rcx) # obj.field<- r0/r1
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_double.S b/runtime/interpreter/mterp/x86_64/op_long_to_double.S
deleted file mode 100644
index 7cdae32..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"dq","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_float.S b/runtime/interpreter/mterp/x86_64/op_long_to_float.S
deleted file mode 100644
index 7553348..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/fpcvt.S" {"source_suffix":"i","dest_suffix":"sq","wide":"0"}
diff --git a/runtime/interpreter/mterp/x86_64/op_long_to_int.S b/runtime/interpreter/mterp/x86_64/op_long_to_int.S
deleted file mode 100644
index 7b50c8e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_long_to_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%include "x86_64/op_move.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S b/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
deleted file mode 100644
index 411091f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_monitor_enter.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Synchronize on an object.
- */
- /* monitor-enter vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artLockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S b/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
deleted file mode 100644
index 72d9a23..0000000
--- a/runtime/interpreter/mterp/x86_64/op_monitor_exit.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Unlock an object.
- *
- * Exceptions that occur when unlocking a monitor need to appear as
- * if they happened at the following instruction. See the Dalvik
- * instruction spec.
- */
- /* monitor-exit vAA */
- EXPORT_PC
- GET_VREG OUT_32_ARG0, rINSTq
- movq rSELF, OUT_ARG1
- call SYMBOL(artUnlockObjectFromCode) # (object, self)
- testq %rax, %rax
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move.S b/runtime/interpreter/mterp/x86_64/op_move.S
deleted file mode 100644
index ccaac2c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move.S
+++ /dev/null
@@ -1,13 +0,0 @@
-%default { "is_object":"0" }
- /* for move, move-object, long-to-int */
- /* op vA, vB */
- movl rINST, %eax # eax <- BA
- andb $$0xf, %al # eax <- A
- shrl $$4, rINST # rINST <- B
- GET_VREG %edx, rINSTq
- .if $is_object
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_16.S b/runtime/interpreter/mterp/x86_64/op_move_16.S
deleted file mode 100644
index 6a813eb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_16.S
+++ /dev/null
@@ -1,12 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/16, move-object/16 */
- /* op vAAAA, vBBBB */
- movzwq 4(rPC), %rcx # ecx <- BBBB
- movzwq 2(rPC), %rax # eax <- AAAA
- GET_VREG %edx, %rcx
- .if $is_object
- SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
- .else
- SET_VREG %edx, %rax # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_exception.S b/runtime/interpreter/mterp/x86_64/op_move_exception.S
deleted file mode 100644
index 33db878..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_exception.S
+++ /dev/null
@@ -1,6 +0,0 @@
- /* move-exception vAA */
- movq rSELF, %rcx
- movl THREAD_EXCEPTION_OFFSET(%rcx), %eax
- SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
- movl $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_from16.S b/runtime/interpreter/mterp/x86_64/op_move_from16.S
deleted file mode 100644
index 150e9c2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_from16.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
- /* for: move/from16, move-object/from16 */
- /* op vAA, vBBBB */
- movzwq 2(rPC), %rax # eax <- BBBB
- GET_VREG %edx, %rax # edx <- fp[BBBB]
- .if $is_object
- SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %edx, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object.S b/runtime/interpreter/mterp/x86_64/op_move_object.S
deleted file mode 100644
index 0d86649..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_16.S b/runtime/interpreter/mterp/x86_64/op_move_object_16.S
deleted file mode 100644
index 32541ff..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object_16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S b/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
deleted file mode 100644
index 983e4ab..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_object_from16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_from16.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result.S b/runtime/interpreter/mterp/x86_64/op_move_result.S
deleted file mode 100644
index 8268344..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result.S
+++ /dev/null
@@ -1,11 +0,0 @@
-%default { "is_object":"0" }
- /* for: move-result, move-result-object */
- /* op vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movl (%rax), %eax # r0 <- result.i.
- .if $is_object
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
- .else
- SET_VREG %eax, rINSTq # fp[A] <- fp[B]
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_object.S b/runtime/interpreter/mterp/x86_64/op_move_result_object.S
deleted file mode 100644
index c5aac17..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_move_result.S" {"is_object":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S b/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
deleted file mode 100644
index 03de783..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_result_wide.S
+++ /dev/null
@@ -1,5 +0,0 @@
- /* move-result-wide vAA */
- movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
- movq (%rax), %rdx # Get wide
- SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide.S b/runtime/interpreter/mterp/x86_64/op_move_wide.S
deleted file mode 100644
index 508f8cc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* move-wide vA, vB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movl rINST, %ecx # ecx <- BA
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
deleted file mode 100644
index ce371a9..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide_16.S
+++ /dev/null
@@ -1,7 +0,0 @@
- /* move-wide/16 vAAAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwq 4(rPC), %rcx # ecx<- BBBB
- movzwq 2(rPC), %rax # eax<- AAAA
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S b/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
deleted file mode 100644
index 0d6971a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_move_wide_from16.S
+++ /dev/null
@@ -1,6 +0,0 @@
- /* move-wide/from16 vAA, vBBBB */
- /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
- movzwl 2(rPC), %ecx # ecx <- BBBB
- GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
- SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double.S b/runtime/interpreter/mterp/x86_64/op_mul_double.S
deleted file mode 100644
index 1f4bcb3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
deleted file mode 100644
index 9850a28..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float.S b/runtime/interpreter/mterp/x86_64/op_mul_float.S
deleted file mode 100644
index 85960e9..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
deleted file mode 100644
index 6d36b6a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"muls","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int.S b/runtime/interpreter/mterp/x86_64/op_mul_int.S
deleted file mode 100644
index 5f3923a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"imull (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
deleted file mode 100644
index 0b5af8a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_2addr.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_VREG %eax, %rcx # eax <- vA
- imull (rFP,rINSTq,4), %eax
- SET_VREG %eax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
deleted file mode 100644
index a4cfdbc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"imull %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
deleted file mode 100644
index 89e9acb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"imull %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long.S b/runtime/interpreter/mterp/x86_64/op_mul_long.S
deleted file mode 100644
index 2b85370..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"imulq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
deleted file mode 100644
index 167128b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_mul_long_2addr.S
+++ /dev/null
@@ -1,8 +0,0 @@
- /* mul vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4, rINST # rINST <- B
- andb $$0xf, %cl # ecx <- A
- GET_WIDE_VREG %rax, %rcx # rax <- vA
- imulq (rFP,rINSTq,4), %rax
- SET_WIDE_VREG %rax, %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_double.S b/runtime/interpreter/mterp/x86_64/op_neg_double.S
deleted file mode 100644
index 2c14b09..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"preinstr":" movq $0x8000000000000000, %rsi", "instr":" xorq %rsi, %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_float.S b/runtime/interpreter/mterp/x86_64/op_neg_float.S
deleted file mode 100644
index 148b21e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":" xorl $0x80000000, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_int.S b/runtime/interpreter/mterp/x86_64/op_neg_int.S
deleted file mode 100644
index f90a937..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":" negl %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_neg_long.S b/runtime/interpreter/mterp/x86_64/op_neg_long.S
deleted file mode 100644
index 18fc3cc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_neg_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":" negq %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_new_array.S b/runtime/interpreter/mterp/x86_64/op_new_array.S
deleted file mode 100644
index 9831a0b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_new_array.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Allocate an array of objects, specified with the array class
- * and a count.
- *
- * The verifier guarantees that this is an array class, so we don't
- * check for it here.
- */
- /* new-array vA, vB, class@CCCC */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpNewArray)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_new_instance.S b/runtime/interpreter/mterp/x86_64/op_new_instance.S
deleted file mode 100644
index fc8c8cd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_new_instance.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Create a new instance of a class.
- */
- /* new-instance vAA, class@BBBB */
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rSELF, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- call SYMBOL(MterpNewInstance)
- testb %al, %al # 0 means an exception is thrown
- jz MterpPossibleException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_nop.S b/runtime/interpreter/mterp/x86_64/op_nop.S
deleted file mode 100644
index 4cb68e3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_nop.S
+++ /dev/null
@@ -1 +0,0 @@
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_not_int.S b/runtime/interpreter/mterp/x86_64/op_not_int.S
deleted file mode 100644
index 463d080..0000000
--- a/runtime/interpreter/mterp/x86_64/op_not_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":" notl %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_not_long.S b/runtime/interpreter/mterp/x86_64/op_not_long.S
deleted file mode 100644
index c97bb9e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_not_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unop.S" {"instr":" notq %rax", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int.S b/runtime/interpreter/mterp/x86_64/op_or_int.S
deleted file mode 100644
index 730310f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"orl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
deleted file mode 100644
index f722e4d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"orl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
deleted file mode 100644
index fee86c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
deleted file mode 100644
index 81104c7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"orl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long.S b/runtime/interpreter/mterp/x86_64/op_or_long.S
deleted file mode 100644
index 6c70a20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"orq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
deleted file mode 100644
index 546da1d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_or_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"orq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S
deleted file mode 100644
index 148552f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_packed_switch.S
+++ /dev/null
@@ -1,18 +0,0 @@
-%default { "func":"MterpDoPackedSwitch" }
-/*
- * Handle a packed-switch or sparse-switch instruction. In both cases
- * we decode it and hand it off to a helper function.
- *
- * We don't really expect backward branches in a switch statement, but
- * they're perfectly legal, so we check for them here.
- *
- * for: packed-switch, sparse-switch
- */
- /* op vAA, +BBBB */
- movslq 2(rPC), OUT_ARG0 # rcx <- ssssssssBBBBbbbb
- leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + ssssssssBBBBbbbb*2
- GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA
- call SYMBOL($func)
- testl %eax, %eax
- movslq %eax, rINSTq
- jmp MterpCommonTakenBranch
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double.S b/runtime/interpreter/mterp/x86_64/op_rem_double.S
deleted file mode 100644
index 00aed78..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_double.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* rem_double vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- fldl VREG_ADDRESS(%rcx) # %st1 <- fp[vBB]
- fldl VREG_ADDRESS(%rax) # %st0 <- fp[vCC]
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(rINSTq) # fp[vAA] <- %st
- CLEAR_WIDE_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
deleted file mode 100644
index 9768266..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_double_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* rem_double/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- fldl VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $$0xf, %cl # ecx <- A
- fldl VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstpl VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_WIDE_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float.S b/runtime/interpreter/mterp/x86_64/op_rem_float.S
deleted file mode 100644
index 5af28ac..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_float.S
+++ /dev/null
@@ -1,14 +0,0 @@
- /* rem_float vAA, vBB, vCC */
- movzbq 3(rPC), %rcx # ecx <- BB
- movzbq 2(rPC), %rax # eax <- CC
- flds VREG_ADDRESS(%rcx) # vBB to fp stack
- flds VREG_ADDRESS(%rax) # vCC to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(rINSTq) # %st to vAA
- CLEAR_REF rINSTq
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
deleted file mode 100644
index e9282a8..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_float_2addr.S
+++ /dev/null
@@ -1,15 +0,0 @@
- /* rem_float/2addr vA, vB */
- movzbq rINSTbl, %rcx # ecx <- A+
- sarl $$4, rINST # rINST <- B
- flds VREG_ADDRESS(rINSTq) # vB to fp stack
- andb $$0xf, %cl # ecx <- A
- flds VREG_ADDRESS(%rcx) # vA to fp stack
-1:
- fprem
- fstsw %ax
- sahf
- jp 1b
- fstp %st(1)
- fstps VREG_ADDRESS(%rcx) # %st to vA
- CLEAR_REF %rcx
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int.S b/runtime/interpreter/mterp/x86_64/op_rem_int.S
deleted file mode 100644
index fd77d7c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
deleted file mode 100644
index 25ffbf7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%edx","second":"%ecx","wide":"0","suffix":"l","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
deleted file mode 100644
index 21cc370..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit16.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
deleted file mode 100644
index 2eb0150..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindivLit8.S" {"result":"%edx","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long.S b/runtime/interpreter/mterp/x86_64/op_rem_long.S
deleted file mode 100644
index efa7215..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","ext":"cqo","rem":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
deleted file mode 100644
index ce0dd86..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rem_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/bindiv2addr.S" {"result":"%rdx","second":"%rcx","wide":"1","suffix":"q","rem":"1","ext":"cqo"}
diff --git a/runtime/interpreter/mterp/x86_64/op_return.S b/runtime/interpreter/mterp/x86_64/op_return.S
deleted file mode 100644
index 8cb6cba..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Return a 32-bit value.
- *
- * for: return, return-object
- */
- /* op vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_VREG %eax, rINSTq # eax <- vAA
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_object.S b/runtime/interpreter/mterp/x86_64/op_return_object.S
deleted file mode 100644
index 1ae69a5..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_return.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void.S b/runtime/interpreter/mterp/x86_64/op_return_void.S
deleted file mode 100644
index ba68e7e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_void.S
+++ /dev/null
@@ -1,9 +0,0 @@
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
deleted file mode 100644
index 6799da1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_void_no_barrier.S
+++ /dev/null
@@ -1,7 +0,0 @@
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- xorq %rax, %rax
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_return_wide.S b/runtime/interpreter/mterp/x86_64/op_return_wide.S
deleted file mode 100644
index d6d6d1b..0000000
--- a/runtime/interpreter/mterp/x86_64/op_return_wide.S
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * Return a 64-bit value.
- */
- /* return-wide vAA */
- .extern MterpThreadFenceForConstructor
- call SYMBOL(MterpThreadFenceForConstructor)
- movq rSELF, OUT_ARG0
- testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(OUT_ARG0)
- jz 1f
- call SYMBOL(MterpSuspendCheck)
-1:
- GET_WIDE_VREG %rax, rINSTq # eax <- v[AA]
- jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int.S b/runtime/interpreter/mterp/x86_64/op_rsub_int.S
deleted file mode 100644
index 2dd2002..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rsub_int.S
+++ /dev/null
@@ -1,2 +0,0 @@
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%include "x86_64/binopLit16.S" {"instr":"subl %eax, %ecx","result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
deleted file mode 100644
index 64d0d8a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_rsub_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"subl %eax, %ecx" , "result":"%ecx"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget.S b/runtime/interpreter/mterp/x86_64/op_sget.S
deleted file mode 100644
index c15ac1e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget.S
+++ /dev/null
@@ -1,26 +0,0 @@
-%default { "is_object":"0", "helper":"MterpSGetU32", "wide":"0" }
-/*
- * General SGET handler wrapper.
- *
- * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short, sget-wide
- */
- /* op vAA, field@BBBB */
- .extern $helper
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref CCCC
- movq OFF_FP_METHOD(rFP), OUT_ARG1 # referrer
- movq rSELF, OUT_ARG2 # self
- call SYMBOL($helper)
- movq rSELF, %rcx
- cmpl $$0, THREAD_EXCEPTION_OFFSET(%rcx)
- jnz MterpException
- .if $is_object
- SET_VREG_OBJECT %eax, rINSTq # fp[A] <- value
- .else
- .if $wide
- SET_WIDE_VREG %rax, rINSTq # fp[A] <- value
- .else
- SET_VREG %eax, rINSTq # fp[A] <- value
- .endif
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S b/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
deleted file mode 100644
index e5a4e41..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_byte.S b/runtime/interpreter/mterp/x86_64/op_sget_byte.S
deleted file mode 100644
index 4602f7d..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_char.S b/runtime/interpreter/mterp/x86_64/op_sget_char.S
deleted file mode 100644
index a094a54..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_object.S b/runtime/interpreter/mterp/x86_64/op_sget_object.S
deleted file mode 100644
index 94597b1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_object.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"is_object":"1", "helper":"MterpSGetObj"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_short.S b/runtime/interpreter/mterp/x86_64/op_sget_short.S
deleted file mode 100644
index dee5c24..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sget_wide.S b/runtime/interpreter/mterp/x86_64/op_sget_wide.S
deleted file mode 100644
index 65ddb8a..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sget_wide.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sget.S" {"helper":"MterpSGetU64", "wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int.S b/runtime/interpreter/mterp/x86_64/op_shl_int.S
deleted file mode 100644
index fa1edb7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
deleted file mode 100644
index dd96279..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
deleted file mode 100644
index 39b23ae..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"sall %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long.S b/runtime/interpreter/mterp/x86_64/op_shl_long.S
deleted file mode 100644
index fdc7cb6..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"salq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
deleted file mode 100644
index 546633f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shl_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"salq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int.S b/runtime/interpreter/mterp/x86_64/op_shr_int.S
deleted file mode 100644
index fc289f4..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
deleted file mode 100644
index 0e5bca7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
deleted file mode 100644
index 3cc9307..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"sarl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long.S b/runtime/interpreter/mterp/x86_64/op_shr_long.S
deleted file mode 100644
index 25028d3..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"sarq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
deleted file mode 100644
index 3738413..0000000
--- a/runtime/interpreter/mterp/x86_64/op_shr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"sarq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S b/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
deleted file mode 100644
index 0eaa514..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sparse_switch.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" }
diff --git a/runtime/interpreter/mterp/x86_64/op_sput.S b/runtime/interpreter/mterp/x86_64/op_sput.S
deleted file mode 100644
index 9a33d52..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput.S
+++ /dev/null
@@ -1,17 +0,0 @@
-%default { "helper":"MterpSPutU32"}
-/*
- * General SPUT handler wrapper.
- *
- * for: sput, sput-boolean, sput-byte, sput-char, sput-short
- */
- /* op vAA, field@BBBB */
- .extern $helper
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- GET_VREG OUT_32_ARG1, rINSTq # fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL($helper)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S b/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
deleted file mode 100644
index ea9acbf..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_boolean.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutU8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_byte.S b/runtime/interpreter/mterp/x86_64/op_sput_byte.S
deleted file mode 100644
index 62c9e20..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_byte.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutI8"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_char.S b/runtime/interpreter/mterp/x86_64/op_sput_char.S
deleted file mode 100644
index ab0196e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_char.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutU16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_object.S b/runtime/interpreter/mterp/x86_64/op_sput_object.S
deleted file mode 100644
index 8a47074..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_object.S
+++ /dev/null
@@ -1,10 +0,0 @@
- EXPORT_PC
- leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG0
- movq rPC, OUT_ARG1
- REFRESH_INST ${opnum}
- movq rINSTq, OUT_ARG2
- movq rSELF, OUT_ARG3
- call SYMBOL(MterpSPutObj)
- testb %al, %al
- jz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_short.S b/runtime/interpreter/mterp/x86_64/op_sput_short.S
deleted file mode 100644
index f73a3fc..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_short.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/op_sput.S" {"helper":"MterpSPutI16"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sput_wide.S b/runtime/interpreter/mterp/x86_64/op_sput_wide.S
deleted file mode 100644
index 464d169..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sput_wide.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPUT_WIDE handler wrapper.
- *
- */
- /* sput-wide vAA, field@BBBB */
- .extern MterpSPutU64
- EXPORT_PC
- movzwq 2(rPC), OUT_ARG0 # field ref BBBB
- leaq VREG_ADDRESS(rINSTq), OUT_ARG1 # &fp[AA]
- movq OFF_FP_METHOD(rFP), OUT_ARG2 # referrer
- movq rSELF, OUT_ARG3 # self
- call SYMBOL(MterpSPutU64)
- testb %al, %al
- jnz MterpException
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double.S b/runtime/interpreter/mterp/x86_64/op_sub_double.S
deleted file mode 100644
index 952667e..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_double.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
deleted file mode 100644
index 0bd5dbb..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_double_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"d"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float.S b/runtime/interpreter/mterp/x86_64/op_sub_float.S
deleted file mode 100644
index ea0ae14..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_float.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
deleted file mode 100644
index 9dd1780..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_float_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/sseBinop2Addr.S" {"instr":"subs","suff":"s"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int.S b/runtime/interpreter/mterp/x86_64/op_sub_int.S
deleted file mode 100644
index 560394f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"subl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
deleted file mode 100644
index 6f50f78..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"subl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long.S b/runtime/interpreter/mterp/x86_64/op_sub_long.S
deleted file mode 100644
index 7fa54e7..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"subq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
deleted file mode 100644
index c18be10..0000000
--- a/runtime/interpreter/mterp/x86_64/op_sub_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"subq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_throw.S b/runtime/interpreter/mterp/x86_64/op_throw.S
deleted file mode 100644
index 8095c25..0000000
--- a/runtime/interpreter/mterp/x86_64/op_throw.S
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Throw an exception object in the current thread.
- */
- /* throw vAA */
- EXPORT_PC
- GET_VREG %eax, rINSTq # eax<- vAA (exception object)
- testb %al, %al
- jz common_errNullObject
- movq rSELF, %rcx
- movq %rax, THREAD_EXCEPTION_OFFSET(%rcx)
- jmp MterpException
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3e.S b/runtime/interpreter/mterp/x86_64/op_unused_3e.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_3e.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_3f.S b/runtime/interpreter/mterp/x86_64/op_unused_3f.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_3f.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_40.S b/runtime/interpreter/mterp/x86_64/op_unused_40.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_40.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_41.S b/runtime/interpreter/mterp/x86_64/op_unused_41.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_41.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_42.S b/runtime/interpreter/mterp/x86_64/op_unused_42.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_42.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_43.S b/runtime/interpreter/mterp/x86_64/op_unused_43.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_43.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_79.S b/runtime/interpreter/mterp/x86_64/op_unused_79.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_79.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_7a.S b/runtime/interpreter/mterp/x86_64/op_unused_7a.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_7a.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f3.S b/runtime/interpreter/mterp/x86_64/op_unused_f3.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f3.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f4.S b/runtime/interpreter/mterp/x86_64/op_unused_f4.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f4.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f5.S b/runtime/interpreter/mterp/x86_64/op_unused_f5.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f5.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f6.S b/runtime/interpreter/mterp/x86_64/op_unused_f6.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f6.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f7.S b/runtime/interpreter/mterp/x86_64/op_unused_f7.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f7.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f8.S b/runtime/interpreter/mterp/x86_64/op_unused_f8.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_f9.S b/runtime/interpreter/mterp/x86_64/op_unused_f9.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_f9.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fc.S b/runtime/interpreter/mterp/x86_64/op_unused_fc.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fc.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_unused_fd.S b/runtime/interpreter/mterp/x86_64/op_unused_fd.S
deleted file mode 100644
index 280615f..0000000
--- a/runtime/interpreter/mterp/x86_64/op_unused_fd.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/unused.S"
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int.S b/runtime/interpreter/mterp/x86_64/op_ushr_int.S
deleted file mode 100644
index dd91086..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
deleted file mode 100644
index d38aedd..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
deleted file mode 100644
index f7ff8ab..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"shrl %cl, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long.S b/runtime/interpreter/mterp/x86_64/op_ushr_long.S
deleted file mode 100644
index 7c6daca..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop1.S" {"instr":"shrq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
deleted file mode 100644
index cd6a22c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_ushr_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/shop2addr.S" {"instr":"shrq %cl, %rax","wide":"1"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int.S b/runtime/interpreter/mterp/x86_64/op_xor_int.S
deleted file mode 100644
index b295d74..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop.S" {"instr":"xorl (rFP,%rcx,4), %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
deleted file mode 100644
index 879bfc0..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binop2addr.S" {"instr":"xorl %eax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
deleted file mode 100644
index 5d375a1..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_lit16.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit16.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S b/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
deleted file mode 100644
index 54cce9c..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_int_lit8.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopLit8.S" {"instr":"xorl %ecx, %eax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long.S b/runtime/interpreter/mterp/x86_64/op_xor_long.S
deleted file mode 100644
index 52b44e2..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_long.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide.S" {"instr":"xorq (rFP,%rcx,4), %rax"}
diff --git a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S b/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
deleted file mode 100644
index d75c4ba..0000000
--- a/runtime/interpreter/mterp/x86_64/op_xor_long_2addr.S
+++ /dev/null
@@ -1 +0,0 @@
-%include "x86_64/binopWide2addr.S" {"instr":"xorq %rax, (rFP,%rcx,4)"}
diff --git a/runtime/interpreter/mterp/x86_64/other.S b/runtime/interpreter/mterp/x86_64/other.S
new file mode 100644
index 0000000..849155c
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64/other.S
@@ -0,0 +1,287 @@
+%def const(helper="UndefinedConstHandler"):
+ /* const/class vAA, type@BBBB */
+ /* const/method-handle vAA, method_handle@BBBB */
+ /* const/method-type vAA, proto@BBBB */
+ /* const/string vAA, string@@BBBB */
+ .extern $helper
+ EXPORT_PC
+ movzwq 2(rPC), OUT_ARG0 # eax <- OUT_ARG0
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL($helper) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def unused():
+/*
+ * Bail to reference interpreter to throw.
+ */
+ jmp MterpFallback
+
+%def op_const():
+ /* const vAA, #+BBBBbbbb */
+ movl 2(rPC), %eax # grab all 32 bits at once
+ SET_VREG %eax, rINSTq # vAA<- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_16():
+ /* const/16 vAA, #+BBBB */
+ movswl 2(rPC), %ecx # ecx <- ssssBBBB
+ SET_VREG %ecx, rINSTq # vAA <- ssssBBBB
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_4():
+ /* const/4 vA, #+B */
+ movsbl rINSTbl, %eax # eax <-ssssssBx
+ movl $$0xf, rINST
+ andl %eax, rINST # rINST <- A
+ sarl $$4, %eax
+ SET_VREG %eax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_const_class():
+% const(helper="MterpConstClass")
+
+%def op_const_high16():
+ /* const/high16 vAA, #+BBBB0000 */
+ movzwl 2(rPC), %eax # eax <- 0000BBBB
+ sall $$16, %eax # eax <- BBBB0000
+ SET_VREG %eax, rINSTq # vAA <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_method_handle():
+% const(helper="MterpConstMethodHandle")
+
+%def op_const_method_type():
+% const(helper="MterpConstMethodType")
+
+%def op_const_string():
+% const(helper="MterpConstString")
+
+%def op_const_string_jumbo():
+ /* const/string vAA, String@BBBBBBBB */
+ EXPORT_PC
+ movl 2(rPC), OUT_32_ARG0 # OUT_32_ARG0 <- BBBB
+ movq rINSTq, OUT_ARG1
+ leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG2
+ movq rSELF, OUT_ARG3
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
+ testb %al, %al
+ jnz MterpPossibleException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide():
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ movq 2(rPC), %rax # rax <- HHHHhhhhBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+%def op_const_wide_16():
+ /* const-wide/16 vAA, #+BBBB */
+ movswq 2(rPC), %rax # rax <- ssssBBBB
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_wide_32():
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ movslq 2(rPC), %rax # eax <- ssssssssBBBBbbbb
+ SET_WIDE_VREG %rax, rINSTq # store
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide_high16():
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ movzwq 2(rPC), %rax # eax <- 0000BBBB
+ salq $$48, %rax # eax <- BBBB0000
+ SET_WIDE_VREG %rax, rINSTq # v[AA+0] <- eax
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_monitor_enter():
+/*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artLockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_monitor_exit():
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ EXPORT_PC
+ GET_VREG OUT_32_ARG0, rINSTq
+ movq rSELF, OUT_ARG1
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
+ testq %rax, %rax
+ jnz MterpException
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move(is_object="0"):
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ movl rINST, %eax # eax <- BA
+ andb $$0xf, %al # eax <- A
+ shrl $$4, rINST # rINST <- B
+ GET_VREG %edx, rINSTq
+ .if $is_object
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_16(is_object="0"):
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ movzwq 4(rPC), %rcx # ecx <- BBBB
+ movzwq 2(rPC), %rax # eax <- AAAA
+ GET_VREG %edx, %rcx
+ .if $is_object
+ SET_VREG_OBJECT %edx, %rax # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, %rax # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_exception():
+ /* move-exception vAA */
+ movq rSELF, %rcx
+ movl THREAD_EXCEPTION_OFFSET(%rcx), %eax
+ SET_VREG_OBJECT %eax, rINSTq # fp[AA] <- exception object
+ movl $$0, THREAD_EXCEPTION_OFFSET(%rcx)
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_from16(is_object="0"):
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ movzwq 2(rPC), %rax # eax <- BBBB
+ GET_VREG %edx, %rax # edx <- fp[BBBB]
+ .if $is_object
+ SET_VREG_OBJECT %edx, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %edx, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_move_object():
+% op_move(is_object="1")
+
+%def op_move_object_16():
+% op_move_16(is_object="1")
+
+%def op_move_object_from16():
+% op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movl (%rax), %eax # r0 <- result.i.
+ .if $is_object
+ SET_VREG_OBJECT %eax, rINSTq # fp[A] <- fp[B]
+ .else
+ SET_VREG %eax, rINSTq # fp[A] <- fp[B]
+ .endif
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_result_object():
+% op_move_result(is_object="1")
+
+%def op_move_result_wide():
+ /* move-result-wide vAA */
+ movq OFF_FP_RESULT_REGISTER(rFP), %rax # get pointer to result JType.
+ movq (%rax), %rdx # Get wide
+ SET_WIDE_VREG %rdx, rINSTq # v[AA] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide():
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movl rINST, %ecx # ecx <- BA
+ sarl $$4, rINST # rINST <- B
+ andb $$0xf, %cl # ecx <- A
+ GET_WIDE_VREG %rdx, rINSTq # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rcx # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide_16():
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwq 4(rPC), %rcx # ecx<- BBBB
+ movzwq 2(rPC), %rax # eax<- AAAA
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, %rax # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_wide_from16():
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ movzwl 2(rPC), %ecx # ecx <- BBBB
+ GET_WIDE_VREG %rdx, %rcx # rdx <- v[B]
+ SET_WIDE_VREG %rdx, rINSTq # v[A] <- rdx
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_nop():
+ ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_unused_3e():
+% unused()
+
+%def op_unused_3f():
+% unused()
+
+%def op_unused_40():
+% unused()
+
+%def op_unused_41():
+% unused()
+
+%def op_unused_42():
+% unused()
+
+%def op_unused_43():
+% unused()
+
+%def op_unused_79():
+% unused()
+
+%def op_unused_7a():
+% unused()
+
+%def op_unused_f3():
+% unused()
+
+%def op_unused_f4():
+% unused()
+
+%def op_unused_f5():
+% unused()
+
+%def op_unused_f6():
+% unused()
+
+%def op_unused_f7():
+% unused()
+
+%def op_unused_f8():
+% unused()
+
+%def op_unused_f9():
+% unused()
+
+%def op_unused_fc():
+% unused()
+
+%def op_unused_fd():
+% unused()
diff --git a/runtime/interpreter/mterp/x86_64/shop2addr.S b/runtime/interpreter/mterp/x86_64/shop2addr.S
deleted file mode 100644
index 6b06d00..0000000
--- a/runtime/interpreter/mterp/x86_64/shop2addr.S
+++ /dev/null
@@ -1,19 +0,0 @@
-%default {"wide":"0"}
-/*
- * Generic 32-bit "shift/2addr" operation.
- */
- /* shift/2addr vA, vB */
- movl rINST, %ecx # ecx <- BA
- sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx, %rcx # ecx <- vBB
- andb $$0xf, rINSTbl # rINST <- A
- .if $wide
- GET_WIDE_VREG %rax, rINSTq # rax <- vAA
- $instr # ex: sarl %cl, %eax
- SET_WIDE_VREG %rax, rINSTq
- .else
- GET_VREG %eax, rINSTq # eax <- vAA
- $instr # ex: sarl %cl, %eax
- SET_VREG %eax, rINSTq
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop.S b/runtime/interpreter/mterp/x86_64/sseBinop.S
deleted file mode 100644
index 09d3364..0000000
--- a/runtime/interpreter/mterp/x86_64/sseBinop.S
+++ /dev/null
@@ -1,9 +0,0 @@
-%default {"instr":"","suff":""}
- movzbq 2(rPC), %rcx # ecx <- BB
- movzbq 3(rPC), %rax # eax <- CC
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
- pxor %xmm0, %xmm0
- movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S b/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
deleted file mode 100644
index 084166b..0000000
--- a/runtime/interpreter/mterp/x86_64/sseBinop2Addr.S
+++ /dev/null
@@ -1,10 +0,0 @@
-%default {"instr":"","suff":""}
- movl rINST, %ecx # ecx <- A+
- andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
- sarl $$4, rINST # rINST<- B
- ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
- pxor %xmm0, %xmm0
- movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unop.S b/runtime/interpreter/mterp/x86_64/unop.S
deleted file mode 100644
index 1777123..0000000
--- a/runtime/interpreter/mterp/x86_64/unop.S
+++ /dev/null
@@ -1,22 +0,0 @@
-%default {"preinstr":"", "instr":"", "wide":"0"}
-/*
- * Generic 32/64-bit unary operation. Provide an "instr" line that
- * specifies an instruction that performs "result = op eax".
- */
- /* unop vA, vB */
- movl rINST, %ecx # rcx <- A+
- sarl $$4,rINST # rINST <- B
- .if ${wide}
- GET_WIDE_VREG %rax, rINSTq # rax <- vB
- .else
- GET_VREG %eax, rINSTq # eax <- vB
- .endif
- andb $$0xf,%cl # ecx <- A
-$preinstr
-$instr
- .if ${wide}
- SET_WIDE_VREG %rax, %rcx
- .else
- SET_VREG %eax, %rcx
- .endif
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/unused.S b/runtime/interpreter/mterp/x86_64/unused.S
deleted file mode 100644
index c95ef94..0000000
--- a/runtime/interpreter/mterp/x86_64/unused.S
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
- * Bail to reference interpreter to throw.
- */
- jmp MterpFallback
diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S
deleted file mode 100644
index fb8ae6a..0000000
--- a/runtime/interpreter/mterp/x86_64/zcmp.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Generic one-operand compare-and-branch operation. Provide a "revcmp"
- * fragment that specifies the *reverse* comparison to perform, e.g.
- * for "if-le" you would use "gt".
- *
- * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
- */
- /* if-cmp vAA, +BBBB */
- cmpl $$0, VREG_ADDRESS(rINSTq) # compare (vA, 0)
- j${revcmp} 1f
- movswq 2(rPC), rINSTq # fetch signed displacement
- testq rINSTq, rINSTq
- jmp MterpCommonTakenBranch
-1:
- cmpl $$JIT_CHECK_OSR, rPROFILE
- je .L_check_not_taken_osr
- ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/safe_math.h b/runtime/interpreter/safe_math.h
index 78b3539..06f046a 100644
--- a/runtime/interpreter/safe_math.h
+++ b/runtime/interpreter/safe_math.h
@@ -41,19 +41,19 @@
return static_cast<biggest_T>(Op<unsigned_biggest_T>()(val1, val2));
}
-// Perform signed a signed add on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed add on 'a' and 'b' with defined wrapping behavior.
template<typename T1, typename T2>
static inline typename select_bigger<T1, T2>::type SafeAdd(T1 a, T2 b) {
return SafeMath<std::plus>(a, b);
}
-// Perform signed a signed substract on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed substract on 'a' and 'b' with defined wrapping behavior.
template<typename T1, typename T2>
static inline typename select_bigger<T1, T2>::type SafeSub(T1 a, T2 b) {
return SafeMath<std::minus>(a, b);
}
-// Perform signed a signed multiply on 'a' and 'b' with defined wrapping behavior.
+// Perform a signed multiply on 'a' and 'b' with defined wrapping behavior.
template<typename T1, typename T2>
static inline typename select_bigger<T1, T2>::type SafeMul(T1 a, T2 b) {
return SafeMath<std::multiplies>(a, b);
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index f76b86c..c0920a8 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -49,6 +49,17 @@
// - interpreter - separate VRegs and reference arrays. References are in the reference array.
// - JNI - just VRegs, but where every VReg holds a reference.
class ShadowFrame {
+ private:
+ // Used to keep track of extra state the shadowframe has.
+ enum class FrameFlags : uint32_t {
+ // We have been requested to notify when this frame gets popped.
+ kNotifyFramePop = 1 << 0,
+ // We have been asked to pop this frame off the stack as soon as possible.
+ kForcePopFrame = 1 << 1,
+ // We have been asked to re-execute the last instruction.
+ kForceRetryInst = 1 << 2,
+ };
+
public:
// Compute size of ShadowFrame in bytes assuming it has a reference array.
static size_t ComputeSize(uint32_t num_vregs) {
@@ -159,14 +170,14 @@
}
int64_t GetVRegLong(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
return *reinterpret_cast<unaligned_int64*>(vreg);
}
double GetVRegDouble(size_t i) const {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
typedef const double unaligned_double __attribute__ ((aligned (4)));
return *reinterpret_cast<unaligned_double*>(vreg);
@@ -179,12 +190,8 @@
mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
mirror::Object* ref;
- if (HasReferenceArray()) {
- ref = References()[i].AsMirrorPtr();
- } else {
- const uint32_t* vreg_ptr = &vregs_[i];
- ref = reinterpret_cast<const StackReference<mirror::Object>*>(vreg_ptr)->AsMirrorPtr();
- }
+ DCHECK(HasReferenceArray());
+ ref = References()[i].AsMirrorPtr();
ReadBarrier::MaybeAssertToSpaceInvariant(ref);
if (kVerifyFlags & kVerifyReads) {
VerifyObject(ref);
@@ -220,7 +227,7 @@
}
void SetVRegLong(size_t i, int64_t val) {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_int64*>(vreg) = val;
@@ -233,7 +240,7 @@
}
void SetVRegDouble(size_t i, double val) {
- DCHECK_LT(i, NumberOfVRegs());
+ DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
typedef double unaligned_double __attribute__ ((aligned (4)));
*reinterpret_cast<unaligned_double*>(vreg) = val;
@@ -279,47 +286,47 @@
return lock_count_data_;
}
- static size_t LockCountDataOffset() {
+ static constexpr size_t LockCountDataOffset() {
return OFFSETOF_MEMBER(ShadowFrame, lock_count_data_);
}
- static size_t LinkOffset() {
+ static constexpr size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
- static size_t MethodOffset() {
+ static constexpr size_t MethodOffset() {
return OFFSETOF_MEMBER(ShadowFrame, method_);
}
- static size_t DexPCOffset() {
+ static constexpr size_t DexPCOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_pc_);
}
- static size_t NumberOfVRegsOffset() {
+ static constexpr size_t NumberOfVRegsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, number_of_vregs_);
}
- static size_t VRegsOffset() {
+ static constexpr size_t VRegsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, vregs_);
}
- static size_t ResultRegisterOffset() {
+ static constexpr size_t ResultRegisterOffset() {
return OFFSETOF_MEMBER(ShadowFrame, result_register_);
}
- static size_t DexPCPtrOffset() {
+ static constexpr size_t DexPCPtrOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_pc_ptr_);
}
- static size_t DexInstructionsOffset() {
+ static constexpr size_t DexInstructionsOffset() {
return OFFSETOF_MEMBER(ShadowFrame, dex_instructions_);
}
- static size_t CachedHotnessCountdownOffset() {
+ static constexpr size_t CachedHotnessCountdownOffset() {
return OFFSETOF_MEMBER(ShadowFrame, cached_hotness_countdown_);
}
- static size_t HotnessCountdownOffset() {
+ static constexpr size_t HotnessCountdownOffset() {
return OFFSETOF_MEMBER(ShadowFrame, hotness_countdown_);
}
@@ -345,11 +352,27 @@
}
bool NeedsNotifyPop() const {
- return needs_notify_pop_;
+ return GetFrameFlag(FrameFlags::kNotifyFramePop);
}
void SetNotifyPop(bool notify) {
- needs_notify_pop_ = notify;
+ UpdateFrameFlag(notify, FrameFlags::kNotifyFramePop);
+ }
+
+ bool GetForcePopFrame() const {
+ return GetFrameFlag(FrameFlags::kForcePopFrame);
+ }
+
+ void SetForcePopFrame(bool enable) {
+ UpdateFrameFlag(enable, FrameFlags::kForcePopFrame);
+ }
+
+ bool GetForceRetryInstruction() const {
+ return GetFrameFlag(FrameFlags::kForceRetryInst);
+ }
+
+ void SetForceRetryInstruction(bool enable) {
+ UpdateFrameFlag(enable, FrameFlags::kForceRetryInst);
}
private:
@@ -364,7 +387,7 @@
dex_pc_(dex_pc),
cached_hotness_countdown_(0),
hotness_countdown_(0),
- needs_notify_pop_(0) {
+ frame_flags_(0) {
// TODO(iam): Remove this parameter, it's an an artifact of portable removal
DCHECK(has_reference_array);
if (has_reference_array) {
@@ -374,6 +397,18 @@
}
}
+ void UpdateFrameFlag(bool enable, FrameFlags flag) {
+ if (enable) {
+ frame_flags_ |= static_cast<uint32_t>(flag);
+ } else {
+ frame_flags_ &= ~static_cast<uint32_t>(flag);
+ }
+ }
+
+ bool GetFrameFlag(FrameFlags flag) const {
+ return (frame_flags_ & static_cast<uint32_t>(flag)) != 0;
+ }
+
const StackReference<mirror::Object>* References() const {
DCHECK(HasReferenceArray());
const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
@@ -397,9 +432,11 @@
uint32_t dex_pc_;
int16_t cached_hotness_countdown_;
int16_t hotness_countdown_;
- // TODO Might be worth it to try to bit-pack this into some other field to reduce stack usage.
- // NB alignment requires that this field takes 4 bytes. Only 1 bit is actually ever used.
- bool needs_notify_pop_;
+
+ // This is a set of ShadowFrame::FrameFlags which denote special states this frame is in.
+ // NB alignment requires that this field takes 4 bytes no matter its size. Only 3 bits are
+ // currently used.
+ uint32_t frame_flags_;
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index d4b51af..38ecc5a 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -638,7 +638,7 @@
}
uint32_t args[1];
- args[0] = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_array.Get()));
+ args[0] = reinterpret_cast32<uint32_t>(h_array.Get());
EnterInterpreterFromInvoke(self, constructor, h_obj.Get(), args, nullptr);
if (self->IsExceptionPending()) {
@@ -1180,19 +1180,19 @@
}
case Primitive::kPrimShort: {
- typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+ using unaligned_short __attribute__((__aligned__(1))) = int16_t;
result->SetS(*reinterpret_cast<unaligned_short*>(static_cast<intptr_t>(address)));
return;
}
case Primitive::kPrimInt: {
- typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+ using unaligned_int __attribute__((__aligned__(1))) = int32_t;
result->SetI(*reinterpret_cast<unaligned_int*>(static_cast<intptr_t>(address)));
return;
}
case Primitive::kPrimLong: {
- typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+ using unaligned_long __attribute__((__aligned__(1))) = int64_t;
result->SetJ(*reinterpret_cast<unaligned_long*>(static_cast<intptr_t>(address)));
return;
}
@@ -1691,14 +1691,21 @@
}
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
- Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args, JValue* result) {
+ Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
int32_t length = args[1];
DCHECK_GE(length, 0);
- ObjPtr<mirror::Class> element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ ObjPtr<mirror::Object> element_class = reinterpret_cast32<mirror::Object*>(args[0])->AsClass();
+ if (element_class == nullptr) {
+ AbortTransactionOrFail(self, "VMRuntime.newUnpaddedArray with null element_class.");
+ return;
+ }
Runtime* runtime = Runtime::Current();
ObjPtr<mirror::Class> array_class =
- runtime->GetClassLinker()->FindArrayClass(self, element_class);
+ runtime->GetClassLinker()->FindArrayClass(self, element_class->AsClass());
DCHECK(array_class != nullptr);
gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
result->SetL(mirror::Array::Alloc<true, true>(self,
@@ -1789,14 +1796,17 @@
receiver->NotifyAll(self);
}
-void UnstartedRuntime::UnstartedJNIStringCompareTo(
- Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, uint32_t* args,
- JValue* result) {
- mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString();
+void UnstartedRuntime::UnstartedJNIStringCompareTo(Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result) {
+ ObjPtr<mirror::Object> rhs = reinterpret_cast32<mirror::Object*>(args[0]);
if (rhs == nullptr) {
- AbortTransactionOrFail(self, "String.compareTo with null object");
+ AbortTransactionOrFail(self, "String.compareTo with null object.");
+ return;
}
- result->SetI(receiver->AsString()->CompareTo(rhs));
+ result->SetI(receiver->AsString()->CompareTo(rhs->AsString()));
}
void UnstartedRuntime::UnstartedJNIStringIntern(
@@ -1854,9 +1864,16 @@
}
void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Unsafe.compareAndSwapInt with null object.");
+ return;
+ }
jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
jint expectedValue = args[3];
jint newValue = args[4];
@@ -1877,12 +1894,14 @@
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
-void UnstartedRuntime::UnstartedJNIUnsafeGetIntVolatile(
- Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
- uint32_t* args, JValue* result) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+void UnstartedRuntime::UnstartedJNIUnsafeGetIntVolatile(Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
if (obj == nullptr) {
- AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ AbortTransactionOrFail(self, "Unsafe.compareAndSwapIntVolatile with null object.");
return;
}
@@ -1890,12 +1909,18 @@
result->SetI(obj->GetField32Volatile(MemberOffset(offset)));
}
-void UnstartedRuntime::UnstartedJNIUnsafePutObject(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result ATTRIBUTE_UNUSED) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+void UnstartedRuntime::UnstartedJNIUnsafePutObject(Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result ATTRIBUTE_UNUSED) {
+ ObjPtr<mirror::Object> obj = reinterpret_cast32<mirror::Object*>(args[0]);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Unsafe.putObject with null object.");
+ return;
+ }
jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
- mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]);
+ ObjPtr<mirror::Object> newValue = reinterpret_cast32<mirror::Object*>(args[3]);
if (Runtime::Current()->IsActiveTransaction()) {
obj->SetFieldObject<true>(MemberOffset(offset), newValue);
} else {
@@ -1904,26 +1929,45 @@
}
void UnstartedRuntime::UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
- ObjPtr<mirror::Class> component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
+ Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
+ if (component == nullptr) {
+ AbortTransactionOrFail(self, "Unsafe.getArrayBaseOffsetForComponentType with null component.");
+ return;
+ }
+ Primitive::Type primitive_type = component->AsClass()->GetPrimitiveType();
result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
}
void UnstartedRuntime::UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
- ObjPtr<mirror::Class> component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
+ Thread* self,
+ ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ ObjPtr<mirror::Object> component = reinterpret_cast32<mirror::Object*>(args[0]);
+ if (component == nullptr) {
+ AbortTransactionOrFail(self, "Unsafe.getArrayIndexScaleForComponentType with null component.");
+ return;
+ }
+ Primitive::Type primitive_type = component->AsClass()->GetPrimitiveType();
result->SetI(Primitive::ComponentSize(primitive_type));
}
-typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result,
- size_t arg_size);
+using InvokeHandler = void(*)(Thread* self,
+ ShadowFrame* shadow_frame,
+ JValue* result,
+ size_t arg_size);
-typedef void (*JNIHandler)(Thread* self, ArtMethod* method, mirror::Object* receiver,
- uint32_t* args, JValue* result);
+using JNIHandler = void(*)(Thread* self,
+ ArtMethod* method,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result);
static bool tables_initialized_ = false;
static std::unordered_map<std::string, InvokeHandler> invoke_handlers_;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 200fc5b..bd2705d 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -261,7 +261,7 @@
UnstartedMemoryPeekShort(self, tmp.get(), &result, 0);
- typedef int16_t unaligned_short __attribute__ ((aligned (1)));
+ using unaligned_short __attribute__((__aligned__(1))) = int16_t;
const unaligned_short* short_ptr = reinterpret_cast<const unaligned_short*>(base_ptr + i);
EXPECT_EQ(result.GetS(), *short_ptr);
}
@@ -284,7 +284,7 @@
UnstartedMemoryPeekInt(self, tmp.get(), &result, 0);
- typedef int32_t unaligned_int __attribute__ ((aligned (1)));
+ using unaligned_int __attribute__((__aligned__(1))) = int32_t;
const unaligned_int* int_ptr = reinterpret_cast<const unaligned_int*>(base_ptr + i);
EXPECT_EQ(result.GetI(), *int_ptr);
}
@@ -307,7 +307,7 @@
UnstartedMemoryPeekLong(self, tmp.get(), &result, 0);
- typedef int64_t unaligned_long __attribute__ ((aligned (1)));
+ using unaligned_long __attribute__((__aligned__(1))) = int64_t;
const unaligned_long* long_ptr = reinterpret_cast<const unaligned_long*>(base_ptr + i);
EXPECT_EQ(result.GetJ(), *long_ptr);
}
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index 481aff9..9245f1e 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -87,13 +87,13 @@
}
}
- virtual bool Accept() REQUIRES(!state_lock_);
+ bool Accept() override REQUIRES(!state_lock_);
- virtual bool Establish(const JdwpOptions*) {
+ bool Establish(const JdwpOptions*) override {
return false;
}
- virtual void Shutdown() REQUIRES(!state_lock_) {
+ void Shutdown() override REQUIRES(!state_lock_) {
int control_sock;
int local_clientSock;
{
@@ -116,7 +116,7 @@
WakePipe();
}
- virtual bool ProcessIncoming() REQUIRES(!state_lock_);
+ bool ProcessIncoming() override REQUIRES(!state_lock_);
private:
int ReceiveClientFd() REQUIRES(!state_lock_);
@@ -346,7 +346,7 @@
if (!HaveFullPacket()) {
/* read some more, looping until we have data */
errno = 0;
- while (1) {
+ while (true) {
int selCount;
fd_set readfds;
int maxfd = -1;
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 1e61ba0..d31f166 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1344,13 +1344,14 @@
VLOG(jdwp) << StringPrintf(" --> event requestId=%#x", requestId);
/* add it to the list */
+ // TODO: RegisterEvent() should take std::unique_ptr<>.
JdwpError err = state->RegisterEvent(pEvent.get());
if (err != ERR_NONE) {
/* registration failed, probably because event is bogus */
LOG(WARNING) << "WARNING: event request rejected";
return err;
}
- pEvent.release();
+ pEvent.release(); // NOLINT b/117926937
return ERR_NONE;
}
@@ -1432,7 +1433,7 @@
/*
* Handler map decl.
*/
-typedef JdwpError (*JdwpRequestHandler)(JdwpState* state, Request* request, ExpandBuf* reply);
+using JdwpRequestHandler = JdwpError(*)(JdwpState* state, Request* request, ExpandBuf* reply);
struct JdwpHandlerMap {
uint8_t cmdSet;
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
index 673a942..b8b0e16 100644
--- a/runtime/jdwp/jdwp_socket.cc
+++ b/runtime/jdwp/jdwp_socket.cc
@@ -54,10 +54,10 @@
remote_port_(0U) {
}
- virtual bool Accept();
- virtual bool Establish(const JdwpOptions*);
- virtual void Shutdown();
- virtual bool ProcessIncoming();
+ bool Accept() override;
+ bool Establish(const JdwpOptions*) override;
+ void Shutdown() override;
+ bool ProcessIncoming() override;
private:
in_addr remote_addr_;
@@ -383,7 +383,7 @@
if (!HaveFullPacket()) {
/* read some more, looping until we have data */
errno = 0;
- while (1) {
+ while (true) {
int selCount;
fd_set readfds;
int maxfd = -1;
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 63fb22c..6cd719a 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -77,11 +77,11 @@
namespace art {
extern "C" {
- typedef enum {
+ enum JITAction {
JIT_NOACTION = 0,
JIT_REGISTER_FN,
JIT_UNREGISTER_FN
- } JITAction;
+ };
struct JITCodeEntry {
// Atomic to ensure the reader can always iterate over the linked list
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index a6bc029..c1f69b8 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -416,7 +416,7 @@
}
extern "C" void art_quick_osr_stub(void** stack,
- uint32_t stack_size_in_bytes,
+ size_t stack_size_in_bytes,
const uint8_t* native_pc,
JValue* result,
const char* shorty,
@@ -718,6 +718,22 @@
method->SetCounter(new_count);
}
+class ScopedSetRuntimeThread {
+ public:
+ explicit ScopedSetRuntimeThread(Thread* self)
+ : self_(self), was_runtime_thread_(self_->IsRuntimeThread()) {
+ self_->SetIsRuntimeThread(true);
+ }
+
+ ~ScopedSetRuntimeThread() {
+ self_->SetIsRuntimeThread(was_runtime_thread_);
+ }
+
+ private:
+ Thread* self_;
+ bool was_runtime_thread_;
+};
+
void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
@@ -728,6 +744,8 @@
ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true);
}
JitCompileTask compile_task(method, JitCompileTask::kCompile);
+ // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+ ScopedSetRuntimeThread ssrt(thread);
compile_task.Run(thread);
}
return;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 2b2898c..63cb6a4 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,16 +18,21 @@
#include <sstream>
+#include "android-base/unique_fd.h"
+
#include "arch/context.h"
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/histogram-inl.h"
#include "base/logging.h" // For VLOG.
+#include "base/membarrier.h"
+#include "base/memfd.h"
#include "base/mem_map.h"
#include "base/quasi_atomic.h"
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "base/utils.h"
#include "cha.h"
#include "debugger_interface.h"
#include "dex/dex_file_loader.h"
@@ -50,15 +55,32 @@
#include "thread-current-inl.h"
#include "thread_list.h"
+using android::base::unique_fd;
+
namespace art {
namespace jit {
-static constexpr int kProtData = PROT_READ | PROT_WRITE;
-static constexpr int kProtCode = PROT_READ | PROT_EXEC;
-
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+static constexpr int kProtR = PROT_READ;
+static constexpr int kProtRW = PROT_READ | PROT_WRITE;
+static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtRX = PROT_READ | PROT_EXEC;
+
+namespace {
+
+// Translate an address belonging to one memory map into an address in a second. This is useful
+// when there are two virtual memory ranges for the same physical memory range.
+template <typename T>
+T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
+ CHECK(src.HasAddress(src_ptr));
+ uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
+ return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
+}
+
+} // namespace
+
class JitCodeCache::JniStubKey {
public:
explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -185,14 +207,43 @@
return nullptr;
}
- // Decide how we should map the code and data sections.
- // If we use the code cache just for profiling we do not need to map the code section as
- // executable.
- // NOTE 1: this is yet another workaround to bypass strict SElinux policies in order to be able
- // to profile system server.
- // NOTE 2: We could just not create the code section at all but we will need to
- // special case too many cases.
- int memmap_flags_prot_code = used_only_for_profile_data ? (kProtCode & ~PROT_EXEC) : kProtCode;
+ // Register for membarrier expedited sync core if JIT will be generating code.
+ if (!used_only_for_profile_data) {
+ if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
+ // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
+ // flushed and it's used when adding code to the JIT. The memory used by the new code may
+ // have just been released and, in theory, the old code could still be in a pipeline.
+ VLOG(jit) << "Kernel does not support membarrier sync-core";
+ }
+ }
+
+ // File descriptor enabling dual-view mapping of code section.
+ unique_fd mem_fd;
+
+ // Bionic supports memfd_create, but the call may fail on older kernels.
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags */ 0));
+ if (mem_fd.get() < 0) {
+ VLOG(jit) << "Failed to initialize dual view JIT. memfd_create() error: "
+ << strerror(errno);
+ }
+
+ if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
+ std::ostringstream oss;
+ oss << "Failed to initialize memory file: " << strerror(errno);
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
+ // Data cache will be half of the initial allocation.
+ // Code cache will be the other half of the initial allocation.
+ // TODO: Make this variable?
+
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+ const size_t data_capacity = max_capacity / 2;
+ const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
+ DCHECK_LE(data_capacity + exec_capacity, max_capacity);
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
@@ -200,71 +251,147 @@
// We could do PC-relative addressing to avoid this problem, but that
// would require reserving code and data area before submitting, which
// means more windows for the code memory to be RWX.
- MemMap data_map = MemMap::MapAnonymous(
- "data-code-cache",
- /* addr */ nullptr,
- max_capacity,
- kProtData,
- /* low_4gb */ true,
- /* reuse */ false,
- /* reservation */ nullptr,
- &error_str);
- if (!data_map.IsValid()) {
+ int base_flags;
+ MemMap data_pages;
+ if (mem_fd.get() >= 0) {
+ // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
+ // for data and non-writable view of JIT code pages. We use the memory file descriptor to
+ // enable dual mapping - we'll create a second mapping using the descriptor below. The
+ // mappings will look like:
+ //
+ // VA PA
+ //
+ // +---------------+
+ // | non exec code |\
+ // +---------------+ \
+ // : :\ \
+ // +---------------+.\.+---------------+
+ // | exec code | \| code |
+ // +---------------+...+---------------+
+ // | data | | data |
+ // +---------------+...+---------------+
+ //
+ // In this configuration code updates are written to the non-executable view of the code
+ // cache, and the executable view of the code cache has fixed RX memory protections.
+ //
+ // This memory needs to be mapped shared as the code portions will have two mappings.
+ base_flags = MAP_SHARED;
+ data_pages = MemMap::MapFile(
+ data_capacity + exec_capacity,
+ kProtRW,
+ base_flags,
+ mem_fd,
+ /* start */ 0,
+ /* low_4gb */ true,
+ "data-code-cache",
+ &error_str);
+ } else {
+ // Single view of JIT code cache case. Create an initial mapping of data pages large enough
+ // for data and JIT code pages. The mappings will look like:
+ //
+ // VA PA
+ //
+ // +---------------+...+---------------+
+ // | exec code | | code |
+ // +---------------+...+---------------+
+ // | data | | data |
+ // +---------------+...+---------------+
+ //
+ // In this configuration code updates are written to the executable view of the code cache,
+ // and the executable view of the code cache transitions RX to RWX for the update and then
+ // back to RX after the update.
+ base_flags = MAP_PRIVATE | MAP_ANON;
+ data_pages = MemMap::MapAnonymous(
+ "data-code-cache",
+ /* addr */ nullptr,
+ data_capacity + exec_capacity,
+ kProtRW,
+ /* low_4gb */ true,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ &error_str);
+ }
+
+ if (!data_pages.IsValid()) {
std::ostringstream oss;
oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
*error_msg = oss.str();
return nullptr;
}
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+ MemMap exec_pages;
+ MemMap non_exec_pages;
+ if (exec_capacity > 0) {
+ uint8_t* const divider = data_pages.Begin() + data_capacity;
+ // Set initial permission for executable view to catch any SELinux permission problems early
+ // (for processes that cannot map WX pages). Otherwise, this region does not need to be
+ // executable as there is no code in the cache yet.
+ exec_pages = data_pages.RemapAtEnd(divider,
+ "jit-code-cache",
+ kProtRX,
+ base_flags | MAP_FIXED,
+ mem_fd.get(),
+ (mem_fd.get() >= 0) ? data_capacity : 0,
+ &error_str);
+ if (!exec_pages.IsValid()) {
+ std::ostringstream oss;
+ oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
- // Data cache is 1 / 2 of the map.
- // TODO: Make this variable?
- size_t data_size = max_capacity / 2;
- size_t code_size = max_capacity - data_size;
- DCHECK_EQ(code_size + data_size, max_capacity);
- uint8_t* divider = data_map.Begin() + data_size;
-
- MemMap code_map = data_map.RemapAtEnd(
- divider, "jit-code-cache", memmap_flags_prot_code | PROT_WRITE, &error_str);
- if (!code_map.IsValid()) {
- std::ostringstream oss;
- oss << "Failed to create read write execute cache: " << error_str << " size=" << max_capacity;
- *error_msg = oss.str();
- return nullptr;
+ if (mem_fd.get() >= 0) {
+ // For dual view, create the secondary view of code memory used for updating code. This view
+ // is never executable.
+ non_exec_pages = MemMap::MapFile(exec_capacity,
+ kProtR,
+ base_flags,
+ mem_fd,
+ /* start */ data_capacity,
+ /* low_4GB */ false,
+ "jit-code-cache-rw",
+ &error_str);
+ if (!non_exec_pages.IsValid()) {
+ // Log and continue as single view JIT.
+ VLOG(jit) << "Failed to map non-executable view of JIT code cache";
+ }
+ }
+ } else {
+ // Profiling only. No memory for code required.
+ DCHECK(used_only_for_profile_data);
}
- DCHECK_EQ(code_map.Begin(), divider);
- data_size = initial_capacity / 2;
- code_size = initial_capacity - data_size;
- DCHECK_EQ(code_size + data_size, initial_capacity);
+
+ const size_t initial_data_capacity = initial_capacity / 2;
+ const size_t initial_exec_capacity =
+ (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
+
return new JitCodeCache(
- std::move(code_map),
- std::move(data_map),
- code_size,
- data_size,
+ std::move(data_pages),
+ std::move(exec_pages),
+ std::move(non_exec_pages),
+ initial_data_capacity,
+ initial_exec_capacity,
max_capacity,
- garbage_collect_code,
- memmap_flags_prot_code);
+ garbage_collect_code);
}
-JitCodeCache::JitCodeCache(MemMap&& code_map,
- MemMap&& data_map,
- size_t initial_code_capacity,
+JitCodeCache::JitCodeCache(MemMap&& data_pages,
+ MemMap&& exec_pages,
+ MemMap&& non_exec_pages,
size_t initial_data_capacity,
+ size_t initial_exec_capacity,
size_t max_capacity,
- bool garbage_collect_code,
- int memmap_flags_prot_code)
+ bool garbage_collect_code)
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- code_map_(std::move(code_map)),
- data_map_(std::move(data_map)),
+ data_pages_(std::move(data_pages)),
+ exec_pages_(std::move(exec_pages)),
+ non_exec_pages_(std::move(non_exec_pages)),
max_capacity_(max_capacity),
- current_capacity_(initial_code_capacity + initial_data_capacity),
- code_end_(initial_code_capacity),
+ current_capacity_(initial_exec_capacity + initial_data_capacity),
data_end_(initial_data_capacity),
+ exec_end_(initial_exec_capacity),
last_collection_increased_code_cache_(false),
garbage_collect_code_(garbage_collect_code),
used_memory_for_data_(0),
@@ -276,40 +403,46 @@
histogram_code_memory_use_("Memory used for compiled code", 16),
histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_),
- memmap_flags_prot_code_(memmap_flags_prot_code) {
+ inline_cache_cond_("Jit inline cache condition variable", lock_) {
- DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity);
- code_mspace_ = create_mspace_with_base(code_map_.Begin(), code_end_, false /*locked*/);
- data_mspace_ = create_mspace_with_base(data_map_.Begin(), data_end_, false /*locked*/);
+ DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
- if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
- PLOG(FATAL) << "create_mspace_with_base failed";
+ // Initialize the data heap
+ data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
+ CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
+
+ // Initialize the code heap
+ MemMap* code_heap = nullptr;
+ if (non_exec_pages_.IsValid()) {
+ code_heap = &non_exec_pages_;
+ } else if (exec_pages_.IsValid()) {
+ code_heap = &exec_pages_;
}
-
- SetFootprintLimit(current_capacity_);
-
- CheckedCall(mprotect,
- "mprotect jit code cache",
- code_map_.Begin(),
- code_map_.Size(),
- memmap_flags_prot_code_);
- CheckedCall(mprotect,
- "mprotect jit data cache",
- data_map_.Begin(),
- data_map_.Size(),
- kProtData);
+ if (code_heap != nullptr) {
+ // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
+ // heap, will take and initialize pages in create_mspace_with_base().
+ CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
+ exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+ CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
+ SetFootprintLimit(current_capacity_);
+ // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
+ // perform the update and there are no other times write access is required.
+ CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
+ } else {
+ exec_mspace_ = nullptr;
+ SetFootprintLimit(current_capacity_);
+ }
VLOG(jit) << "Created jit code cache: initial data size="
<< PrettySize(initial_data_capacity)
<< ", initial code size="
- << PrettySize(initial_code_capacity);
+ << PrettySize(initial_exec_capacity);
}
JitCodeCache::~JitCodeCache() {}
bool JitCodeCache::ContainsPc(const void* ptr) const {
- return code_map_.Begin() <= ptr && ptr < code_map_.End();
+ return exec_pages_.Begin() <= ptr && ptr < exec_pages_.End();
}
bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -377,22 +510,20 @@
: ScopedTrace("ScopedCodeCacheWrite"),
code_cache_(code_cache) {
ScopedTrace trace("mprotect all");
- CheckedCall(
- mprotect,
- "make code writable",
- code_cache_->code_map_.Begin(),
- code_cache_->code_map_.Size(),
- code_cache_->memmap_flags_prot_code_ | PROT_WRITE);
+ const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+ if (updatable_pages != nullptr) {
+ int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
+ CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+ }
}
~ScopedCodeCacheWrite() {
ScopedTrace trace("mprotect code");
- CheckedCall(
- mprotect,
- "make code protected",
- code_cache_->code_map_.Begin(),
- code_cache_->code_map_.Size(),
- code_cache_->memmap_flags_prot_code_);
+ const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
+ if (updatable_pages != nullptr) {
+ int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
+ CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+ }
}
private:
@@ -409,7 +540,7 @@
size_t code_size,
size_t data_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
+ const std::vector<Handle<mirror::Object>>& roots,
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
uint8_t* result = CommitCodeInternal(self,
@@ -475,18 +606,16 @@
return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
}
-static void DCheckRootsAreValid(Handle<mirror::ObjectArray<mirror::Object>> roots)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!kIsDebugBuild) {
return;
}
- const uint32_t length = roots->GetLength();
// Put all roots in `roots_data`.
- for (uint32_t i = 0; i < length; ++i) {
- ObjPtr<mirror::Object> object = roots->Get(i);
+ for (Handle<mirror::Object> object : roots) {
// Ensure the string is strongly interned. b/32995596
if (object->IsString()) {
- ObjPtr<mirror::String> str = ObjPtr<mirror::String>::DownCast(object);
+ ObjPtr<mirror::String> str = object->AsString();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
}
@@ -494,12 +623,12 @@
}
void JitCodeCache::FillRootTable(uint8_t* roots_data,
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
+ const std::vector<Handle<mirror::Object>>& roots) {
GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
- const uint32_t length = roots->GetLength();
+ const uint32_t length = roots.size();
// Put all roots in `roots_data`.
for (uint32_t i = 0; i < length; ++i) {
- ObjPtr<mirror::Object> object = roots->Get(i);
+ ObjPtr<mirror::Object> object = roots[i].Get();
gc_roots[i] = GcRoot<mirror::Object>(object);
}
}
@@ -528,7 +657,7 @@
// This does not need a read barrier because this is called by GC.
mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
if (cls != nullptr && cls != weak_sentinel) {
- DCHECK((cls->IsClass<kDefaultVerifyFlags, kWithoutReadBarrier>()));
+ DCHECK((cls->IsClass<kDefaultVerifyFlags>()));
// Look at the classloader of the class to know if it has been unloaded.
// This does not need a read barrier because this is called by GC.
mirror::Object* class_loader =
@@ -596,7 +725,13 @@
if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
FreeData(GetRootTable(code_ptr));
} // else this is a JNI stub without any data.
- FreeCode(reinterpret_cast<uint8_t*>(allocation));
+
+ uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
+ if (HasDualCodeMapping()) {
+ code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
+ }
+
+ FreeCode(code_allocation);
}
void JitCodeCache::FreeAllMethodHeaders(
@@ -747,6 +882,16 @@
}
}
+const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
+ if (HasDualCodeMapping()) {
+ return &non_exec_pages_;
+ } else if (HasCodeMapping()) {
+ return &exec_pages_;
+ } else {
+ return nullptr;
+ }
+}
+
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
@@ -755,7 +900,7 @@
size_t code_size,
size_t data_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
+ const std::vector<Handle<mirror::Object>>& roots,
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>&
cha_single_implementation_list) {
@@ -767,47 +912,89 @@
DCheckRootsAreValid(roots);
}
- size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
- // Ensure the header ends up at expected instruction alignment.
- size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
- size_t total_size = header_size + code_size;
-
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
- uint8_t* memory = nullptr;
+
MutexLock mu(self, lock_);
// We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
// finish.
WaitForPotentialCollectionToCompleteRunnable(self);
{
ScopedCodeCacheWrite scc(this);
- memory = AllocateCode(total_size);
- if (memory == nullptr) {
+
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ // Ensure the header ends up at expected instruction alignment.
+ size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+ size_t total_size = header_size + code_size;
+
+ // AllocateCode allocates memory in non-executable region for alignment header and code. The
+ // header size may include alignment padding.
+ uint8_t* nox_memory = AllocateCode(total_size);
+ if (nox_memory == nullptr) {
return nullptr;
}
- code_ptr = memory + header_size;
+ // code_ptr points to non-executable code.
+ code_ptr = nox_memory + header_size;
std::copy(code, code + code_size, code_ptr);
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+
+ // From here code_ptr points to executable code.
+ if (HasDualCodeMapping()) {
+ code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
+ }
+
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
code_size);
- // Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
- // trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
- // This is a kernel bug that we need to work around until affected devices (e.g. Nexus 5X and
- // 6P) stop being supported or their kernels are fixed.
- //
- // For reference, this behavior is caused by this commit:
- // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
- FlushInstructionCache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + code_size));
+
DCHECK(!Runtime::Current()->IsAotCompiler());
if (has_should_deoptimize_flag) {
method_header->SetHasShouldDeoptimizeFlag();
}
+ // Update method_header pointer to executable code region.
+ if (HasDualCodeMapping()) {
+ method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
+ }
+
+ // Both instruction and data caches need flushing to the point of unification where both share
+ // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
+ // newly added code are written out to the point of unification. Flushing the instruction
+ // cache ensures the newly written code will be fetched from the point of unification before
+ // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
+ // prevent stale code from residing in the instruction cache.
+ //
+ // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
+ // may trigger a segfault if a page fault occurs when requesting a cache maintenance
+ // operation. This is a kernel bug that we need to work around until affected devices
+ // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
+ //
+ // For reference, this behavior is caused by this commit:
+ // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+ //
+ if (HasDualCodeMapping()) {
+ // Flush the data cache lines associated with the non-executable copy of the code just added.
+ FlushDataCache(nox_memory, nox_memory + total_size);
+ }
+ // FlushInstructionCache() flushes both data and instruction caches lines. The cacheline range
+ // flushed is for the executable mapping of the code just added.
+ FlushInstructionCache(code_ptr, code_ptr + code_size);
+
+ // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
+ // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
+ // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
+ // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
+ // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
+ // hardware support that broadcasts TLB invalidations and so their kernels have no software
+ // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
+ // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
+ // platforms lacking the appropriate support.
+ art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
+
number_of_compilations_++;
}
+
// We need to update the entry point in the runnable state for the instrumentation.
{
// The following needs to be guarded by cha_lock_ also. Otherwise it's possible that the
@@ -858,8 +1045,7 @@
FillRootTable(roots_data, roots);
{
// Flush data cache, as compiled code references literals in it.
- FlushDataCache(reinterpret_cast<char*>(roots_data),
- reinterpret_cast<char*>(roots_data + data_size));
+ FlushDataCache(roots_data, roots_data + data_size);
}
method_code_map_.Put(code_ptr, method);
if (osr) {
@@ -1153,9 +1339,9 @@
DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
DCHECK_EQ(per_space_footprint * 2, new_footprint);
mspace_set_footprint_limit(data_mspace_, per_space_footprint);
- {
+ if (HasCodeMapping()) {
ScopedCodeCacheWrite scc(this);
- mspace_set_footprint_limit(code_mspace_, per_space_footprint);
+ mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
}
}
@@ -1230,8 +1416,8 @@
number_of_collections_++;
live_bitmap_.reset(CodeCacheBitmap::Create(
"code-cache-bitmap",
- reinterpret_cast<uintptr_t>(code_map_.Begin()),
- reinterpret_cast<uintptr_t>(code_map_.Begin() + current_capacity_ / 2)));
+ reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
+ reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
collection_in_progress_ = true;
}
}
@@ -1600,15 +1786,17 @@
// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
// is already held.
void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
- if (code_mspace_ == mspace) {
- size_t result = code_end_;
- code_end_ += increment;
- return reinterpret_cast<void*>(result + code_map_.Begin());
+ if (mspace == exec_mspace_) {
+ DCHECK(exec_mspace_ != nullptr);
+ const MemMap* const code_pages = GetUpdatableCodeMapping();
+ void* result = code_pages->Begin() + exec_end_;
+ exec_end_ += increment;
+ return result;
} else {
DCHECK_EQ(data_mspace_, mspace);
- size_t result = data_end_;
+ void* result = data_pages_.Begin() + data_end_;
data_end_ += increment;
- return reinterpret_cast<void*>(result + data_map_.Begin());
+ return result;
}
}
@@ -1835,7 +2023,7 @@
uint8_t* JitCodeCache::AllocateCode(size_t code_size) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
uint8_t* result = reinterpret_cast<uint8_t*>(
- mspace_memalign(code_mspace_, alignment, code_size));
+ mspace_memalign(exec_mspace_, alignment, code_size));
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
// Ensure the header ends up at expected instruction alignment.
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
@@ -1845,7 +2033,7 @@
void JitCodeCache::FreeCode(uint8_t* code) {
used_memory_for_code_ -= mspace_usable_size(code);
- mspace_free(code_mspace_, code);
+ mspace_free(exec_mspace_, code);
}
uint8_t* JitCodeCache::AllocateData(size_t data_size) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a4a0f8f..76ad8db 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -141,7 +141,7 @@
size_t code_size,
size_t data_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
+ const std::vector<Handle<mirror::Object>>& roots,
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>& cha_single_implementation_list)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -223,7 +223,7 @@
REQUIRES_SHARED(Locks::mutator_lock_);
bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
- return mspace == code_mspace_ || mspace == data_mspace_;
+ return mspace == data_mspace_ || mspace == exec_mspace_;
}
void* MoreCore(const void* mspace, intptr_t increment);
@@ -279,13 +279,13 @@
private:
// Take ownership of maps.
- JitCodeCache(MemMap&& code_map,
- MemMap&& data_map,
- size_t initial_code_capacity,
+ JitCodeCache(MemMap&& data_pages,
+ MemMap&& exec_pages,
+ MemMap&& non_exec_pages,
size_t initial_data_capacity,
+ size_t initial_exec_capacity,
size_t max_capacity,
- bool garbage_collect_code,
- int memmap_flags_prot_code);
+ bool garbage_collect_code);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -297,14 +297,14 @@
size_t code_size,
size_t data_size,
bool osr,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
+ const std::vector<Handle<mirror::Object>>& roots,
bool has_should_deoptimize_flag,
const ArenaSet<ArtMethod*>& cha_single_implementation_list)
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Adds the given roots to the roots_data. Only a member for annotalysis.
- void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror::Object>> roots)
+ void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
REQUIRES(lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -381,6 +381,16 @@
uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
void FreeData(uint8_t* data) REQUIRES(lock_);
+ bool HasDualCodeMapping() const {
+ return non_exec_pages_.IsValid();
+ }
+
+ bool HasCodeMapping() const {
+ return exec_pages_.IsValid();
+ }
+
+ const MemMap* GetUpdatableCodeMapping() const;
+
bool IsWeakAccessEnabled(Thread* self) const;
void WaitUntilInlineCacheAccessible(Thread* self)
REQUIRES(!lock_)
@@ -395,14 +405,17 @@
ConditionVariable lock_cond_ GUARDED_BY(lock_);
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(lock_);
- // Mem map which holds code.
- MemMap code_map_;
// Mem map which holds data (stack maps and profiling info).
- MemMap data_map_;
- // The opaque mspace for allocating code.
- void* code_mspace_ GUARDED_BY(lock_);
+ MemMap data_pages_;
+ // Mem map which holds code and has executable permission.
+ MemMap exec_pages_;
+ // Mem map which holds code with non executable permission. Only valid for dual view JIT when
+ // this is the non-executable view of code used to write updates.
+ MemMap non_exec_pages_;
// The opaque mspace for allocating data.
void* data_mspace_ GUARDED_BY(lock_);
+ // The opaque mspace for allocating code.
+ void* exec_mspace_ GUARDED_BY(lock_);
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
// Holds compiled code associated with the shorty for a JNI stub.
@@ -420,12 +433,12 @@
// The current capacity in bytes of the code cache.
size_t current_capacity_ GUARDED_BY(lock_);
- // The current footprint in bytes of the code portion of the code cache.
- size_t code_end_ GUARDED_BY(lock_);
-
// The current footprint in bytes of the data portion of the code cache.
size_t data_end_ GUARDED_BY(lock_);
+ // The current footprint in bytes of the code portion of the code cache.
+ size_t exec_end_ GUARDED_BY(lock_);
+
// Whether the last collection round increased the code cache.
bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
@@ -464,9 +477,6 @@
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
- // Mapping flags for the code section.
- const int memmap_flags_prot_code_;
-
friend class art::JitJniStubTestHelper;
friend class ScopedCodeCacheWrite;
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index d9ef922..9043f26 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -274,7 +274,7 @@
: profile_boot_class_path_(profile_boot_class_path),
out_(out) {}
- virtual bool operator()(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
if (klass->IsProxyClass() ||
klass->IsArrayClass() ||
klass->IsPrimitive() ||
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index c5e8830..6f61f5e 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -181,7 +181,7 @@
}
}
- VarArgs(VarArgs&& other) {
+ VarArgs(VarArgs&& other) noexcept {
m_ = other.m_;
cnt_ = other.cnt_;
type_ = other.type_;
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index fdf0fee..6769368 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -86,7 +86,10 @@
self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
}
- android::CloseNativeLibrary(handle_, needs_native_bridge_);
+ std::string error_msg;
+ if (!android::CloseNativeLibrary(handle_, needs_native_bridge_, &error_msg)) {
+ LOG(WARNING) << "Error while unloading native library \"" << path_ << "\": " << error_msg;
+ }
}
jweak GetClassLoader() const {
@@ -330,7 +333,7 @@
}
ScopedThreadSuspension sts(self, kNative);
// Do this without holding the jni libraries lock to prevent possible deadlocks.
- typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
+ using JNI_OnUnloadFn = void(*)(JavaVM*, void*);
for (auto library : unload_libraries) {
void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
if (sym == nullptr) {
@@ -1023,7 +1026,7 @@
self->SetClassLoaderOverride(class_loader);
VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
- typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
+ using JNI_OnLoadFn = int(*)(JavaVM*, void*);
JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
int version = (*jni_on_load)(this, nullptr);
diff --git a/runtime/jni/java_vm_ext_test.cc b/runtime/jni/java_vm_ext_test.cc
index 4049c6e..dfe50cf 100644
--- a/runtime/jni/java_vm_ext_test.cc
+++ b/runtime/jni/java_vm_ext_test.cc
@@ -27,7 +27,7 @@
class JavaVmExtTest : public CommonRuntimeTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
vm_ = Runtime::Current()->GetJavaVM();
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 3040b90..4ad4c14 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -34,7 +34,7 @@
// TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
class JniInternalTest : public CommonCompilerTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonCompilerTest::SetUp();
vm_ = Runtime::Current()->GetJavaVM();
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index d1c230f..6a0f075 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -95,7 +95,7 @@
tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
}
- static size_t TaggedTopQuickFrameOffset() {
+ static constexpr size_t TaggedTopQuickFrameOffset() {
return OFFSETOF_MEMBER(ManagedStack, tagged_top_quick_frame_);
}
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index d489f14..9660bf0 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,7 +36,8 @@
}
private:
- uint8_t flag_;
+ // We only use the field indirectly using the FlagOffset() method.
+ uint8_t flag_ ATTRIBUTE_UNUSED;
// Padding required for correct alignment of subclasses like Executable, Field, etc.
uint8_t padding_[1] ATTRIBUTE_UNUSED;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 2e39530..704fb11 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -50,14 +50,6 @@
return header_size + data_size;
}
-inline MemberOffset Array::DataOffset(size_t component_size) {
- DCHECK(IsPowerOfTwo(component_size)) << component_size;
- size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
- DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
- << "Array data offset isn't aligned with component size";
- return MemberOffset(data_offset);
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline bool Array::CheckIsValidIndex(int32_t index) {
if (UNLIKELY(static_cast<uint32_t>(index) >=
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 8bdd561..7edc851 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "base/bit_utils.h"
#include "base/enums.h"
#include "gc/allocator_type.h"
#include "obj_ptr.h"
@@ -66,11 +67,17 @@
SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
}
- static MemberOffset LengthOffset() {
+ static constexpr MemberOffset LengthOffset() {
return OFFSET_OF_OBJECT_MEMBER(Array, length_);
}
- static MemberOffset DataOffset(size_t component_size);
+ static constexpr MemberOffset DataOffset(size_t component_size) {
+ DCHECK(IsPowerOfTwo(component_size)) << component_size;
+ size_t data_offset = RoundUp(OFFSETOF_MEMBER(Array, first_element_), component_size);
+ DCHECK_EQ(RoundUp(data_offset, component_size), data_offset)
+ << "Array data offset isn't aligned with component size";
+ return MemberOffset(data_offset);
+ }
void* GetRawData(size_t component_size, int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -102,9 +109,11 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// The number of array elements.
- int32_t length_;
+ // We only use the field indirectly using the LengthOffset() method.
+ int32_t length_ ATTRIBUTE_UNUSED;
// Marker for the data (used by generated code)
- uint32_t first_element_[0];
+ // We only use the field indirectly using the DataOffset() method.
+ uint32_t first_element_[0] ATTRIBUTE_UNUSED;
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
};
@@ -189,8 +198,9 @@
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kVerifyNone>
void** ElementAddress(size_t index, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_LT(index, static_cast<size_t>(GetLength()));
+ DCHECK_LT(index, static_cast<size_t>(GetLength<kVerifyFlags>()));
return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(this) +
Array::DataOffset(static_cast<size_t>(ptr_size)).Uint32Value() +
static_cast<size_t>(ptr_size) * index);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 51dc1a4..df70fab 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -42,17 +42,17 @@
namespace art {
namespace mirror {
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline uint32_t Class::GetObjectSize() {
// Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
- DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf();
+ DCHECK((!IsVariableSize<kVerifyFlags>())) << "class=" << PrettyTypeOf();
return GetField32(ObjectSizeOffset());
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline uint32_t Class::GetObjectSizeAllocFastPath() {
// Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
- DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf();
+ DCHECK((!IsVariableSize<kVerifyFlags>())) << "class=" << PrettyTypeOf();
return GetField32(ObjectSizeAllocFastPathOffset());
}
@@ -251,18 +251,14 @@
uint32_t num_direct,
uint32_t num_virtual) {
DCHECK_LE(num_direct + num_virtual, (new_methods == nullptr) ? 0 : new_methods->size());
- SetMethodsPtrInternal(new_methods);
+ SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, methods_),
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(new_methods)));
SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, copied_methods_offset_),
dchecked_integral_cast<uint16_t>(num_direct + num_virtual));
SetFieldShort<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_offset_),
dchecked_integral_cast<uint16_t>(num_direct));
}
-inline void Class::SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods) {
- SetField64<false>(OFFSET_OF_OBJECT_MEMBER(Class, methods_),
- static_cast<uint64_t>(reinterpret_cast<uintptr_t>(new_methods)));
-}
-
template<VerifyObjectFlags kVerifyFlags>
inline ArtMethod* Class::GetVirtualMethod(size_t i, PointerSize pointer_size) {
CheckPointerSize(pointer_size);
@@ -304,7 +300,7 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline int32_t Class::GetVTableLength() {
- if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+ if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
return GetEmbeddedVTableLength();
}
return GetVTable<kVerifyFlags, kReadBarrierOption>() != nullptr ?
@@ -313,7 +309,7 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ArtMethod* Class::GetVTableEntry(uint32_t i, PointerSize pointer_size) {
- if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+ if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
return GetEmbeddedVTableEntry(i, pointer_size);
}
auto* vtable = GetVTable<kVerifyFlags, kReadBarrierOption>();
@@ -322,8 +318,9 @@
i, pointer_size);
}
+template<VerifyObjectFlags kVerifyFlags>
inline int32_t Class::GetEmbeddedVTableLength() {
- return GetField32(MemberOffset(EmbeddedVTableLengthOffset()));
+ return GetField32<kVerifyFlags>(MemberOffset(EmbeddedVTableLengthOffset()));
}
inline void Class::SetEmbeddedVTableLength(int32_t len) {
@@ -374,13 +371,13 @@
return false;
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Class::IsVariableSize() {
// Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
- return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
- IsArrayClass<kVerifyFlags, kReadBarrierOption>() ||
- IsStringClass();
+ return IsClassClass<kVerifyFlags>() ||
+ IsArrayClass<kVerifyFlags>() ||
+ IsStringClass<kVerifyFlags>();
}
inline void Class::SetObjectSize(uint32_t new_object_size) {
@@ -647,19 +644,18 @@
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
ObjPtr<Class> super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
return (super_class != nullptr)
- ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
- kHeapReferenceSize))
+ ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags>(), kHeapReferenceSize))
: ClassOffset();
}
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template <VerifyObjectFlags kVerifyFlags>
inline MemberOffset Class::GetFirstReferenceStaticFieldOffset(PointerSize pointer_size) {
- DCHECK(IsResolved());
+ DCHECK(IsResolved<kVerifyFlags>());
uint32_t base = sizeof(Class); // Static fields come after the class.
- if (ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>()) {
+ if (ShouldHaveEmbeddedVTable<kVerifyFlags>()) {
// Static fields come after the embedded tables.
base = Class::ComputeClassSize(
- true, GetEmbeddedVTableLength(), 0, 0, 0, 0, 0, pointer_size);
+ true, GetEmbeddedVTableLength<kVerifyFlags>(), 0, 0, 0, 0, 0, pointer_size);
}
return MemberOffset(base);
}
@@ -853,10 +849,11 @@
return size;
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Class::IsClassClass() {
- ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
- template GetClass<kVerifyFlags, kReadBarrierOption>();
+ // OK to look at from-space copies since java.lang.Class.class is not movable.
+ // See b/114413743
+ ObjPtr<Class> java_lang_Class = GetClass<kVerifyFlags, kWithoutReadBarrier>();
return this == java_lang_Class;
}
@@ -1003,7 +1000,6 @@
}
inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
- CheckPointerSize(pointer_size);
return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
}
@@ -1016,15 +1012,18 @@
return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ComponentTypeOffset());
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Class::IsArrayClass() {
- return GetComponentType<kVerifyFlags, kReadBarrierOption>() != nullptr;
+ // We do not need a read barrier for comparing with null.
+ return GetComponentType<kVerifyFlags, kWithoutReadBarrier>() != nullptr;
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Class::IsObjectArrayClass() {
- ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
- return component_type != nullptr && !component_type->IsPrimitive();
+ // We do not need a read barrier here as the primitive type is constant,
+ // both from-space and to-space component type classes shall yield the same result.
+ ObjPtr<Class> const component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
+ return component_type != nullptr && !component_type->IsPrimitive<kVerifyFlags>();
}
inline bool Class::IsAssignableFrom(ObjPtr<Class> src) {
@@ -1066,49 +1065,42 @@
return arr != nullptr ? arr->size() : 0u;
}
-template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
+template <typename T, VerifyObjectFlags kVerifyFlags, typename Visitor>
+inline void Class::FixupNativePointer(
+ Class* dest, PointerSize pointer_size, const Visitor& visitor, MemberOffset member_offset) {
+ void** address =
+ reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) + member_offset.Uint32Value());
+ T old_value = GetFieldPtrWithSize<T, kVerifyFlags>(member_offset, pointer_size);
+ T new_value = visitor(old_value, address);
+ if (old_value != new_value) {
+ dest->SetFieldPtrWithSize</* kTransactionActive */ false,
+ /* kCheckTransaction */ true,
+ kVerifyNone>(member_offset, new_value, pointer_size);
+ }
+}
+
+template <VerifyObjectFlags kVerifyFlags, typename Visitor>
inline void Class::FixupNativePointers(Class* dest,
PointerSize pointer_size,
const Visitor& visitor) {
- auto dest_address_fn = [dest](MemberOffset offset) {
- return reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) + offset.Uint32Value());
- };
// Update the field arrays.
- LengthPrefixedArray<ArtField>* const sfields = GetSFieldsPtr();
- void** sfields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
- LengthPrefixedArray<ArtField>* const new_sfields = visitor(sfields, sfields_dest_address);
- if (sfields != new_sfields) {
- dest->SetSFieldsPtrUnchecked(new_sfields);
- }
- LengthPrefixedArray<ArtField>* const ifields = GetIFieldsPtr();
- void** ifields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
- LengthPrefixedArray<ArtField>* const new_ifields = visitor(ifields, ifields_dest_address);
- if (ifields != new_ifields) {
- dest->SetIFieldsPtrUnchecked(new_ifields);
- }
+ FixupNativePointer<LengthPrefixedArray<ArtField>*, kVerifyFlags>(
+ dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
+ FixupNativePointer<LengthPrefixedArray<ArtField>*, kVerifyFlags>(
+ dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
// Update method array.
- LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- void** methods_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, methods_));
- LengthPrefixedArray<ArtMethod>* new_methods = visitor(methods, methods_dest_address);
- if (methods != new_methods) {
- dest->SetMethodsPtrInternal(new_methods);
- }
+ FixupNativePointer<LengthPrefixedArray<ArtMethod>*, kVerifyFlags>(
+ dest, pointer_size, visitor, OFFSET_OF_OBJECT_MEMBER(Class, methods_));
// Fix up embedded tables.
- if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
- for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
- ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
- void** method_dest_addr = dest_address_fn(EmbeddedVTableEntryOffset(i, pointer_size));
- ArtMethod* new_method = visitor(method, method_dest_addr);
- if (method != new_method) {
- dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
- }
+ if (!IsTemp<kVerifyNone>() && ShouldHaveEmbeddedVTable<kVerifyNone>()) {
+ for (int32_t i = 0, count = GetEmbeddedVTableLength<kVerifyFlags>(); i < count; ++i) {
+ FixupNativePointer<ArtMethod*, kVerifyFlags>(
+ dest, pointer_size, visitor, EmbeddedVTableEntryOffset(i, pointer_size));
}
}
- if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) {
- ImTable* imt = GetImt(pointer_size);
- void** imt_dest_addr = dest_address_fn(ImtPtrOffset(pointer_size));
- ImTable* new_imt = visitor(imt, imt_dest_addr);
- dest->SetImt(new_imt, pointer_size);
+ if (!IsTemp<kVerifyNone>() && ShouldHaveImt<kVerifyNone>()) {
+ FixupNativePointer<ImTable*, kVerifyFlags>(
+ dest, pointer_size, visitor, ImtPtrOffset(pointer_size));
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 811ee51..eddc84b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -89,7 +89,7 @@
static void SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static MemberOffset StatusOffset() {
+ static constexpr MemberOffset StatusOffset() {
return MemberOffset(OFFSET_OF_OBJECT_MEMBER(Class, status_));
}
@@ -173,7 +173,7 @@
return GetField32<kVerifyFlags>(AccessFlagsOffset());
}
- static MemberOffset AccessFlagsOffset() {
+ static constexpr MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
@@ -191,8 +191,9 @@
}
// Returns true if the class is an interface.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsInterface() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccInterface) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
@@ -235,24 +236,27 @@
SetAccessFlags(flags | kAccClassIsFinalizable);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetClassFlags() & kClassFlagString) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagString) != 0;
}
ALWAYS_INLINE void SetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetClassFlags() == kClassFlagClassLoader;
+ return GetClassFlags<kVerifyFlags>() == kClassFlagClassLoader;
}
ALWAYS_INLINE void SetClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagClassLoader);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetClassFlags() & kClassFlagDexCache) != 0;
+ return (GetClassFlags<kVerifyFlags>() & kClassFlagDexCache) != 0;
}
ALWAYS_INLINE void SetDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -260,8 +264,9 @@
}
// Returns true if the class is abstract.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsAbstract() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccAbstract) != 0;
+ return (GetAccessFlags<kVerifyFlags>() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
@@ -324,11 +329,12 @@
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassStatus s = GetStatus();
+ ClassStatus s = GetStatus<kVerifyFlags>();
return s < ClassStatus::kResolving &&
s != ClassStatus::kErrorResolved &&
- ShouldHaveEmbeddedVTable();
+ ShouldHaveEmbeddedVTable<kVerifyFlags>();
}
String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
@@ -346,7 +352,7 @@
return (access_flags & kAccClassIsProxy) != 0;
}
- static MemberOffset PrimitiveTypeOffset() {
+ static constexpr MemberOffset PrimitiveTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_);
}
@@ -426,17 +432,15 @@
// Depth of class from java.lang.Object
uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsClassClass() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset ComponentTypeOffset() {
+ static constexpr MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
}
@@ -469,15 +473,15 @@
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_) {
- return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
- (IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
+ return (!IsPrimitive<kVerifyFlags>() &&
+ !IsInterface<kVerifyFlags>() &&
+ !IsAbstract<kVerifyFlags>()) ||
+ (IsAbstract<kVerifyFlags>() && IsArrayClass<kVerifyFlags>());
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -504,8 +508,7 @@
ObjPtr<Object> AllocNonMovableObject(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool IsVariableSize() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -544,13 +547,12 @@
return ComputeClassSize(false, 0, 0, 0, 0, 0, 0, pointer_size);
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset ObjectSizeOffset() {
+ static constexpr MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
- static MemberOffset ObjectSizeAllocFastPathOffset() {
+ static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
}
@@ -558,8 +560,7 @@
void SetObjectSizeAllocFastPath(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetObjectSizeAllocFastPath() REQUIRES_SHARED(Locks::mutator_lock_);
void SetObjectSizeWithoutChecks(uint32_t new_object_size)
@@ -635,7 +636,7 @@
return GetSuperClass() != nullptr;
}
- static MemberOffset SuperClassOffset() {
+ static constexpr MemberOffset SuperClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
@@ -645,11 +646,11 @@
void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset DexCacheOffset() {
+ static constexpr MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
}
- static MemberOffset IfTableOffset() {
+ static constexpr MemberOffset IfTableOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, iftable_));
}
@@ -674,7 +675,7 @@
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset MethodsOffset() {
+ static constexpr MemberOffset MethodsOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
@@ -783,30 +784,28 @@
void SetVTable(ObjPtr<PointerArray> new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
- static MemberOffset VTableOffset() {
+ static constexpr MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
}
- static MemberOffset EmbeddedVTableLengthOffset() {
+ static constexpr MemberOffset EmbeddedVTableLengthOffset() {
return MemberOffset(sizeof(Class));
}
- static MemberOffset ImtPtrOffset(PointerSize pointer_size) {
+ static constexpr MemberOffset ImtPtrOffset(PointerSize pointer_size) {
return MemberOffset(
RoundUp(EmbeddedVTableLengthOffset().Uint32Value() + sizeof(uint32_t),
static_cast<size_t>(pointer_size)));
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_) {
- return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>();
+ return ShouldHaveEmbeddedVTable<kVerifyFlags>();
}
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool ShouldHaveEmbeddedVTable() REQUIRES_SHARED(Locks::mutator_lock_) {
- return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
+ return IsInstantiable<kVerifyFlags>();
}
bool HasVTable() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -822,6 +821,7 @@
ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int32_t GetEmbeddedVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
void SetEmbeddedVTableLength(int32_t len) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -977,9 +977,10 @@
// Returns the number of instance fields containing reference types. Does not count fields in any
// super classes.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(IsResolved());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
+ DCHECK(IsResolved<kVerifyFlags>());
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
uint32_t NumReferenceInstanceFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1005,9 +1006,10 @@
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(IsResolved());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
+ DCHECK(IsResolved<kVerifyFlags>());
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
uint32_t NumReferenceStaticFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1021,8 +1023,7 @@
}
// Get the offset of the first reference static field. Other reference static fields follow.
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1262,14 +1263,14 @@
// the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
// initialized to a copy of *this to prevent issues. Does not visit the ArtMethod and ArtField
// roots.
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
- typename Visitor>
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename Visitor>
void FixupNativePointers(Class* dest, PointerSize pointer_size, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
+ template <typename T, VerifyObjectFlags kVerifyFlags, typename Visitor>
+ void FixupNativePointer(
+ Class* dest, PointerSize pointer_size, const Visitor& visitor, MemberOffset member_offset)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE static ArraySlice<ArtMethod> GetMethodsSliceRangeUnchecked(
@@ -1426,6 +1427,7 @@
// Tid used to check for recursive <clinit> invocation.
pid_t clinit_thread_id_;
+ static_assert(sizeof(pid_t) == sizeof(int32_t), "java.lang.Class.clinitThreadId size check");
// ClassDef index in dex file, -1 if no class definition such as an array.
// TODO: really 16bits
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index 39c8ee0..64b4e74 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -33,7 +33,7 @@
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
if (kVisitClasses) {
// Visit classes loaded after.
- ClassTable* const class_table = GetClassTable();
+ ClassTable* const class_table = GetClassTable<kVerifyFlags>();
if (class_table != nullptr) {
class_table->VisitRoots(visitor);
}
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index f25f18f..e3cb12f 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -44,9 +44,10 @@
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ClassTable* GetClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ClassTable*>(
- GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
+ GetField64<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
}
void SetClassTable(ClassTable* class_table) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index bbe15ac..6efb747 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -310,16 +310,17 @@
// Visit arrays after.
if (kVisitNativeRoots) {
VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
- GetStrings(), NumStrings(), visitor);
+ GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
- GetResolvedTypes(), NumResolvedTypes(), visitor);
+ GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
- GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
+ GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
- GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites();
- for (size_t i = 0, num_call_sites = NumResolvedCallSites(); i != num_call_sites; ++i) {
+ GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
+ size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
+ for (size_t i = 0; i != num_call_sites; ++i) {
visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 8401b66..da1cd3f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -226,51 +226,51 @@
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
- static MemberOffset StringsOffset() {
+ static constexpr MemberOffset StringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
- static MemberOffset ResolvedTypesOffset() {
+ static constexpr MemberOffset ResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_);
}
- static MemberOffset ResolvedFieldsOffset() {
+ static constexpr MemberOffset ResolvedFieldsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_fields_);
}
- static MemberOffset ResolvedMethodsOffset() {
+ static constexpr MemberOffset ResolvedMethodsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_methods_);
}
- static MemberOffset ResolvedMethodTypesOffset() {
+ static constexpr MemberOffset ResolvedMethodTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_method_types_);
}
- static MemberOffset ResolvedCallSitesOffset() {
+ static constexpr MemberOffset ResolvedCallSitesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_call_sites_);
}
- static MemberOffset NumStringsOffset() {
+ static constexpr MemberOffset NumStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_strings_);
}
- static MemberOffset NumResolvedTypesOffset() {
+ static constexpr MemberOffset NumResolvedTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_types_);
}
- static MemberOffset NumResolvedFieldsOffset() {
+ static constexpr MemberOffset NumResolvedFieldsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_fields_);
}
- static MemberOffset NumResolvedMethodsOffset() {
+ static constexpr MemberOffset NumResolvedMethodsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_methods_);
}
- static MemberOffset NumResolvedMethodTypesOffset() {
+ static constexpr MemberOffset NumResolvedMethodTypesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
}
- static MemberOffset NumResolvedCallSitesOffset() {
+ static constexpr MemberOffset NumResolvedCallSitesOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_call_sites_);
}
@@ -326,16 +326,18 @@
ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved)
REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
+ return GetFieldPtr64<StringDexCacheType*, kVerifyFlags>(StringsOffset());
}
void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(StringsOffset(), strings);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
+ return GetFieldPtr<TypeDexCacheType*, kVerifyFlags>(ResolvedTypesOffset());
}
void SetResolvedTypes(TypeDexCacheType* resolved_types)
@@ -364,9 +366,10 @@
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
MethodTypeDexCacheType* GetResolvedMethodTypes()
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr64<MethodTypeDexCacheType*>(ResolvedMethodTypesOffset());
+ return GetFieldPtr64<MethodTypeDexCacheType*, kVerifyFlags>(ResolvedMethodTypesOffset());
}
void SetResolvedMethodTypes(MethodTypeDexCacheType* resolved_method_types)
@@ -375,10 +378,11 @@
SetFieldPtr<false>(ResolvedMethodTypesOffset(), resolved_method_types);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
GcRoot<CallSite>* GetResolvedCallSites()
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<GcRoot<CallSite>*>(ResolvedCallSitesOffset());
+ return GetFieldPtr<GcRoot<CallSite>*, kVerifyFlags>(ResolvedCallSitesOffset());
}
void SetResolvedCallSites(GcRoot<CallSite>* resolved_call_sites)
@@ -387,28 +391,34 @@
SetFieldPtr<false>(ResolvedCallSitesOffset(), resolved_call_sites);
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumStringsOffset());
+ return GetField32<kVerifyFlags>(NumStringsOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumResolvedTypesOffset());
+ return GetField32<kVerifyFlags>(NumResolvedTypesOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumResolvedMethodsOffset());
+ return GetField32<kVerifyFlags>(NumResolvedMethodsOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumResolvedFieldsOffset());
+ return GetField32<kVerifyFlags>(NumResolvedFieldsOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedMethodTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumResolvedMethodTypesOffset());
+ return GetField32<kVerifyFlags>(NumResolvedMethodTypesOffset());
}
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
size_t NumResolvedCallSites() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetField32(NumResolvedCallSitesOffset());
+ return GetField32<kVerifyFlags>(NumResolvedCallSitesOffset());
}
const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/executable.cc b/runtime/mirror/executable.cc
index fac3319..24e2047 100644
--- a/runtime/mirror/executable.cc
+++ b/runtime/mirror/executable.cc
@@ -38,18 +38,6 @@
template bool Executable::CreateFromArtMethod<PointerSize::k64, false>(ArtMethod* method);
template bool Executable::CreateFromArtMethod<PointerSize::k64, true>(ArtMethod* method);
-ArtMethod* Executable::GetArtMethod() {
- return reinterpret_cast<ArtMethod*>(GetField64(ArtMethodOffset()));
-}
-
-template <bool kTransactionActive>
-void Executable::SetArtMethod(ArtMethod* method) {
- SetField64<kTransactionActive>(ArtMethodOffset(), reinterpret_cast<uint64_t>(method));
-}
-
-template void Executable::SetArtMethod<false>(ArtMethod* method);
-template void Executable::SetArtMethod<true>(ArtMethod* method);
-
mirror::Class* Executable::GetDeclaringClass() {
return GetFieldObject<mirror::Class>(DeclaringClassOffset());
}
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index bf66d79..14c9d4c 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -18,7 +18,7 @@
#define ART_RUNTIME_MIRROR_EXECUTABLE_H_
#include "accessible_object.h"
-#include "object.h"
+#include "object-inl.h"
#include "read_barrier_option.h"
namespace art {
@@ -36,10 +36,19 @@
bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- // Only used by the image writer.
- template <bool kTransactionActive = false>
- void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return reinterpret_cast64<ArtMethod*>(GetField64<kVerifyFlags>(ArtMethodOffset()));
+ }
+
+ template <bool kTransactionActive = false,
+ bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+ ArtMethodOffset(), reinterpret_cast64<uint64_t>(method));
+ }
+
mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index 9e3c9af..3d4c5a7 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -39,9 +39,15 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ PointerArray* GetMethodArrayOrNull(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
+ return down_cast<PointerArray*>(
+ Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
+ }
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
PointerArray* GetMethodArray(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* method_array = down_cast<PointerArray*>(Get<kVerifyFlags, kReadBarrierOption>(
- (i * kMax) + kMethodArray));
+ PointerArray* method_array = GetMethodArrayOrNull<kVerifyFlags, kReadBarrierOption>(i);
DCHECK(method_array != nullptr);
return method_array;
}
@@ -49,9 +55,8 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
size_t GetMethodArrayCount(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* method_array = down_cast<PointerArray*>(
- Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
- return method_array == nullptr ? 0u : method_array->GetLength();
+ PointerArray* method_array = GetMethodArrayOrNull<kVerifyFlags, kReadBarrierOption>(i);
+ return method_array == nullptr ? 0u : method_array->GetLength<kVerifyFlags>();
}
void SetMethodArray(int32_t i, ObjPtr<PointerArray> arr) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index bd89907..fbe002a 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -137,38 +137,42 @@
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsClass() {
- constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
- template GetClass<kVerifyFlags, kReadBarrierOption>();
- return GetClass<kNewFlags, kReadBarrierOption>() == java_lang_Class;
+ // OK to look at from-space copies since java.lang.Class.class is not movable.
+ // See b/114413743
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kWithoutReadBarrier>();
+ ObjPtr<Class> java_lang_Class = klass->GetClass<kVerifyFlags, kWithoutReadBarrier>();
+ return klass == java_lang_Class;
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline Class* Object::AsClass() {
- DCHECK((IsClass<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((IsClass<kVerifyFlags>()));
return down_cast<Class*>(this);
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsObjectArray() {
+ // We do not need a read barrier here as the primitive type is constant,
+ // both from-space and to-space component type classes shall yield the same result.
constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- return IsArrayInstance<kVerifyFlags, kReadBarrierOption>() &&
- !GetClass<kNewFlags, kReadBarrierOption>()->
- template GetComponentType<kNewFlags, kReadBarrierOption>()->IsPrimitive();
+ return IsArrayInstance<kVerifyFlags>() &&
+ !GetClass<kNewFlags, kWithoutReadBarrier>()->
+ template GetComponentType<kNewFlags, kWithoutReadBarrier>()->IsPrimitive();
}
-template<class T, VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<class T, VerifyObjectFlags kVerifyFlags>
inline ObjectArray<T>* Object::AsObjectArray() {
- DCHECK((IsObjectArray<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((IsObjectArray<kVerifyFlags>()));
return down_cast<ObjectArray<T>*>(this);
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsArrayInstance() {
- return GetClass<kVerifyFlags, kReadBarrierOption>()->
- template IsArrayClass<kVerifyFlags, kReadBarrierOption>();
+ // We do not need a read barrier here, both from-space and to-space version of the class
+ // shall return the same result from IsArrayClass().
+ return GetClass<kVerifyFlags, kWithoutReadBarrier>()->template IsArrayClass<kVerifyFlags>();
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -182,9 +186,9 @@
return down_cast<Reference*>(this);
}
-template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+template<VerifyObjectFlags kVerifyFlags>
inline Array* Object::AsArray() {
- DCHECK((IsArrayInstance<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((IsArrayInstance<kVerifyFlags>()));
return down_cast<Array*>(this);
}
@@ -348,14 +352,14 @@
static constexpr ReadBarrierOption kRBO = kWithoutReadBarrier;
size_t result;
constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
- if (IsArrayInstance<kVerifyFlags, kRBO>()) {
- result = AsArray<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
- } else if (IsClass<kNewFlags, kRBO>()) {
- result = AsClass<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
+ if (IsArrayInstance<kVerifyFlags>()) {
+ result = AsArray<kNewFlags>()->template SizeOf<kNewFlags, kRBO>();
+ } else if (IsClass<kNewFlags>()) {
+ result = AsClass<kNewFlags>()->template SizeOf<kNewFlags, kRBO>();
} else if (GetClass<kNewFlags, kRBO>()->IsStringClass()) {
result = AsString<kNewFlags, kRBO>()->template SizeOf<kNewFlags>();
} else {
- result = GetClass<kNewFlags, kRBO>()->template GetObjectSize<kNewFlags, kRBO>();
+ result = GetClass<kNewFlags, kRBO>()->template GetObjectSize<kNewFlags>();
}
DCHECK_GE(result, sizeof(Object)) << " class=" << Class::PrettyClass(GetClass<kNewFlags, kRBO>());
return result;
@@ -364,7 +368,7 @@
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline int8_t Object::GetFieldByte(MemberOffset field_offset) {
Verify<kVerifyFlags>();
- return GetField<int8_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<int8_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags>
@@ -391,7 +395,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<uint8_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<uint8_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive,
@@ -407,7 +411,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<int8_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<int8_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -425,13 +429,13 @@
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline uint16_t Object::GetFieldChar(MemberOffset field_offset) {
Verify<kVerifyFlags>();
- return GetField<uint16_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<uint16_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline int16_t Object::GetFieldShort(MemberOffset field_offset) {
Verify<kVerifyFlags>();
- return GetField<int16_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<int16_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags>
@@ -457,7 +461,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<uint16_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<uint16_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive,
@@ -473,7 +477,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<int16_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<int16_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -501,7 +505,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<int32_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<int32_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -531,7 +535,7 @@
kIsVolatile);
}
Verify<kVerifyFlags>();
- SetField<int64_t, kIsVolatile>(field_offset, new_value);
+ SetFieldPrimitive<int64_t, kIsVolatile>(field_offset, new_value);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -584,6 +588,10 @@
return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
}
+/*
+ * Returns a pointer to an object representing what the field points to, not an
+ * object representing the field.
+ */
template<class T,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
@@ -867,7 +875,7 @@
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
for (ObjPtr<Class> klass = kIsStatic
- ? AsClass<kVerifyFlags, kReadBarrierOption>()
+ ? AsClass<kVerifyFlags>()
: GetClass<kVerifyFlags, kReadBarrierOption>();
klass != nullptr;
klass = kIsStatic ? nullptr : klass->GetSuperClass<kVerifyFlags, kReadBarrierOption>()) {
@@ -879,7 +887,7 @@
// Presumably GC can happen when we are cross compiling, it should not cause performance
// problems to do pointer size logic.
MemberOffset field_offset = kIsStatic
- ? klass->GetFirstReferenceStaticFieldOffset<kVerifyFlags, kReadBarrierOption>(
+ ? klass->GetFirstReferenceStaticFieldOffset<kVerifyFlags>(
Runtime::Current()->GetClassLinker()->GetImagePointerSize())
: klass->GetFirstReferenceInstanceFieldOffset<kVerifyFlags, kReadBarrierOption>();
for (size_t i = 0u; i < num_reference_fields; ++i) {
@@ -902,13 +910,13 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption, typename Visitor>
inline void Object::VisitStaticFieldsReferences(ObjPtr<Class> klass, const Visitor& visitor) {
- DCHECK(!klass->IsTemp());
+ DCHECK(!klass->IsTemp<kVerifyFlags>());
klass->VisitFieldsReferences<true, kVerifyFlags, kReadBarrierOption>(0, visitor);
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsClassLoader() {
- return GetClass<kVerifyFlags, kReadBarrierOption>()->IsClassLoaderClass();
+ return GetClass<kVerifyFlags, kReadBarrierOption>()->template IsClassLoaderClass<kVerifyFlags>();
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -919,7 +927,7 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsDexCache() {
- return GetClass<kVerifyFlags, kReadBarrierOption>()->IsDexCacheClass();
+ return GetClass<kVerifyFlags, kReadBarrierOption>()->template IsDexCacheClass<kVerifyFlags>();
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index cc375bd..8689e4d 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -131,7 +131,7 @@
UNREACHABLE();
}
DCHECK(kUseBakerReadBarrier);
- LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
+ LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index 39e32bf..f0bee5a 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -33,27 +33,27 @@
typename JavaLangRefVisitor>
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
+ visitor(this, ClassOffset(), /* is_static= */ false);
ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
- visitor(this, ClassOffset(), false);
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
if (LIKELY(class_flags == kClassFlagNormal)) {
- DCHECK((!klass->IsVariableSize<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((!klass->IsVariableSize<kVerifyFlags>()));
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
- DCHECK((!klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- DCHECK(!klass->IsStringClass());
- DCHECK(!klass->IsClassLoaderClass());
- DCHECK((!klass->IsArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ DCHECK((!klass->IsClassClass<kVerifyFlags>()));
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
+ DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
+ DCHECK((!klass->IsArrayClass<kVerifyFlags>()));
} else {
if ((class_flags & kClassFlagNoReferenceFields) == 0) {
- DCHECK(!klass->IsStringClass());
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
if (class_flags == kClassFlagClass) {
- DCHECK((klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- ObjPtr<Class> as_klass = AsClass<kVerifyNone, kReadBarrierOption>();
+ DCHECK((klass->IsClassClass<kVerifyFlags>()));
+ ObjPtr<Class> as_klass = AsClass<kVerifyNone>();
as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
visitor);
} else if (class_flags == kClassFlagObjectArray) {
- DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
- AsObjectArray<mirror::Object, kVerifyNone, kReadBarrierOption>()->VisitReferences(visitor);
+ DCHECK((klass->IsObjectArrayClass<kVerifyFlags>()));
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
} else if ((class_flags & kClassFlagReference) != 0) {
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
ref_visitor(klass, AsReference<kVerifyFlags, kReadBarrierOption>());
@@ -69,15 +69,16 @@
kReadBarrierOption>(klass, visitor);
}
} else if (kIsDebugBuild) {
- CHECK((!klass->IsClassClass<kVerifyFlags, kReadBarrierOption>()));
- CHECK((!klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ CHECK((!klass->IsClassClass<kVerifyFlags>()));
+ CHECK((!klass->IsObjectArrayClass<kVerifyFlags>()));
// String still has instance fields for reflection purposes but these don't exist in
// actual string instances.
- if (!klass->IsStringClass()) {
+ if (!klass->IsStringClass<kVerifyFlags>()) {
size_t total_reference_instance_fields = 0;
ObjPtr<Class> super_class = klass;
do {
- total_reference_instance_fields += super_class->NumReferenceInstanceFields();
+ total_reference_instance_fields +=
+ super_class->NumReferenceInstanceFields<kVerifyFlags>();
super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
} while (super_class != nullptr);
// The only reference field should be the object's class. This field is handled at the
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 47aded3..11e8cca 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -86,7 +86,7 @@
return sizeof(Object);
}
- static MemberOffset ClassOffset() {
+ static constexpr MemberOffset ClassOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, klass_);
}
@@ -138,7 +138,7 @@
REQUIRES(!Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
- static MemberOffset MonitorOffset() {
+ static constexpr MemberOffset MonitorOffset() {
return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
}
@@ -169,19 +169,14 @@
void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
- template<class T,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ObjectArray<T>* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -198,11 +193,9 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
- ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -349,11 +342,35 @@
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<typename kType, bool kIsVolatile>
+ ALWAYS_INLINE void SetFieldPrimitive(MemberOffset field_offset, kType new_value)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
+ kType* addr = reinterpret_cast<kType*>(raw_addr);
+ if (kIsVolatile) {
+ reinterpret_cast<Atomic<kType>*>(addr)->store(new_value, std::memory_order_seq_cst);
+ } else {
+ reinterpret_cast<Atomic<kType>*>(addr)->StoreJavaData(new_value);
+ }
+ }
+
+ template<typename kType, bool kIsVolatile>
+ ALWAYS_INLINE kType GetFieldPrimitive(MemberOffset field_offset)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+ const kType* addr = reinterpret_cast<const kType*>(raw_addr);
+ if (kIsVolatile) {
+ return reinterpret_cast<const Atomic<kType>*>(addr)->load(std::memory_order_seq_cst);
+ } else {
+ return reinterpret_cast<const Atomic<kType>*>(addr)->LoadJavaData();
+ }
+ }
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
Verify<kVerifyFlags>();
- return GetField<uint8_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<uint8_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -440,7 +457,7 @@
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
Verify<kVerifyFlags>();
- return GetField<int32_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<int32_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -482,7 +499,7 @@
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
Verify<kVerifyFlags>();
- return GetField<int64_t, kIsVolatile>(field_offset);
+ return GetFieldPrimitive<int64_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -683,30 +700,6 @@
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- template<typename kSize, bool kIsVolatile>
- ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- kSize* addr = reinterpret_cast<kSize*>(raw_addr);
- if (kIsVolatile) {
- reinterpret_cast<Atomic<kSize>*>(addr)->store(new_value, std::memory_order_seq_cst);
- } else {
- reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
- }
- }
-
- template<typename kSize, bool kIsVolatile>
- ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
- const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
- if (kIsVolatile) {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->load(std::memory_order_seq_cst);
- } else {
- return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
- }
- }
-
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 1d2f47f..7d101bf 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -67,7 +67,7 @@
template<class T> template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline T* ObjectArray<T>::Get(int32_t i) {
- if (!CheckIsValidIndex(i)) {
+ if (!CheckIsValidIndex<kVerifyFlags>(i)) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/mirror/object_reference-inl.h b/runtime/mirror/object_reference-inl.h
index 295b460..780d662 100644
--- a/runtime/mirror/object_reference-inl.h
+++ b/runtime/mirror/object_reference-inl.h
@@ -24,17 +24,26 @@
namespace art {
namespace mirror {
-template<bool kPoisonReferences, class MirrorType>
+template <bool kPoisonReferences, class MirrorType>
+ALWAYS_INLINE
void ObjectReference<kPoisonReferences, MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
Assign(ptr.Ptr());
}
-template<class MirrorType>
+template <class MirrorType>
+ALWAYS_INLINE
bool HeapReference<MirrorType>::CasWeakRelaxed(MirrorType* expected_ptr, MirrorType* new_ptr) {
return reference_.CompareAndSetWeakRelaxed(Compression::Compress(expected_ptr),
Compression::Compress(new_ptr));
}
+template <typename MirrorType>
+template <bool kIsVolatile>
+ALWAYS_INLINE
+void HeapReference<MirrorType>::Assign(ObjPtr<MirrorType> ptr) {
+ Assign<kIsVolatile>(ptr.Ptr());
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 77154e2..d6a39aa 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -60,6 +60,15 @@
using Compression = PtrCompression<kPoisonReferences, MirrorType>;
public:
+ /*
+ * Returns a pointer to the mirror of the managed object this reference is for.
+ *
+ * This does NOT return the current object (which isn't derived from, and
+ * therefor cannot be a mirror::Object) as a mirror pointer. Instead, this
+ * returns a pointer to the mirror of the managed object this refers to.
+ *
+ * TODO (chriswailes): Rename to GetPtr().
+ */
MirrorType* AsMirrorPtr() const {
return Compression::Decompress(reference_);
}
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 0b615a6..cf6543f 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -204,7 +204,7 @@
template<typename ArrayT>
void TestPrimitiveArray(ClassLinker* cl) {
ScopedObjectAccess soa(Thread::Current());
- typedef typename ArrayT::ElementType T;
+ using T = typename ArrayT::ElementType;
StackHandleScope<2> hs(soa.Self());
Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
@@ -252,9 +252,9 @@
}
TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
- typedef DoubleArray ArrayT;
+ using ArrayT = DoubleArray;
ScopedObjectAccess soa(Thread::Current());
- typedef typename ArrayT::ElementType T;
+ using T = typename ArrayT::ElementType;
StackHandleScope<2> hs(soa.Self());
Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
@@ -283,9 +283,9 @@
}
TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) {
- typedef FloatArray ArrayT;
+ using ArrayT = FloatArray;
ScopedObjectAccess soa(Thread::Current());
- typedef typename ArrayT::ElementType T;
+ using T = typename ArrayT::ElementType;
StackHandleScope<2> hs(soa.Self());
Handle<ArrayT> a = hs.NewHandle(ArrayT::Alloc(soa.Self(), 2));
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index d08717c..b32db08 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -50,11 +50,11 @@
return sizeof(String);
}
- static MemberOffset CountOffset() {
+ static constexpr MemberOffset CountOffset() {
return OFFSET_OF_OBJECT_MEMBER(String, count_);
}
- static MemberOffset ValueOffset() {
+ static constexpr MemberOffset ValueOffset() {
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 864e1ea..4391910 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -18,6 +18,7 @@
#include "array-inl.h"
#include "art_field-inl.h"
+#include "base/casts.h"
#include "class-inl.h"
#include "class_linker.h"
#include "class_root.h"
@@ -691,7 +692,7 @@
template <typename T>
class FieldAccessViaAccessor {
public:
- typedef Object::Accessor<T> Accessor;
+ using Accessor = Object::Accessor<T>;
// Apply an Accessor to get a field in an object.
static void Get(ObjPtr<Object> obj,
@@ -1033,7 +1034,7 @@
CASMode::kStrong,
std::memory_order_seq_cst);
}
- StoreResult(cas_result, result);
+ StoreResult(static_cast<uint8_t>(cas_result), result);
break;
}
case VarHandle::AccessMode::kWeakCompareAndSet:
@@ -1058,7 +1059,7 @@
CASMode::kWeak,
std::memory_order_seq_cst);
}
- StoreResult(cas_result, result);
+ StoreResult(static_cast<uint8_t>(cas_result), result);
break;
}
case VarHandle::AccessMode::kCompareAndExchange:
@@ -1680,8 +1681,7 @@
}
ArtField* FieldVarHandle::GetField() {
- uintptr_t opaque_field = static_cast<uintptr_t>(GetField64(ArtFieldOffset()));
- return reinterpret_cast<ArtField*>(opaque_field);
+ return reinterpret_cast64<ArtField*>(GetField64(ArtFieldOffset()));
}
bool FieldVarHandle::Access(AccessMode access_mode,
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 72eced2..02aa1a8 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1401,7 +1401,10 @@
// Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
// the locks held in this stack frame.
std::vector<verifier::MethodVerifier::DexLockInfo> monitor_enter_dex_pcs;
- verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
+ verifier::MethodVerifier::FindLocksAtDexPc(m,
+ dex_pc,
+ &monitor_enter_dex_pcs,
+ Runtime::Current()->GetTargetSdkVersion());
for (verifier::MethodVerifier::DexLockInfo& dex_lock_info : monitor_enter_dex_pcs) {
// As a debug check, check that dex PC corresponds to a monitor-enter.
if (kIsDebugBuild) {
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index c88748f..0b168f8 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -62,7 +62,7 @@
monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis),
expected_(expected) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
{
ScopedObjectAccess soa(self);
@@ -118,7 +118,7 @@
}
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -136,7 +136,7 @@
monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis),
expected_(expected) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
monitor_test_->barrier_->Wait(self); // Wait for the other thread to set up the monitor.
{
@@ -158,7 +158,7 @@
monitor_test_->complete_barrier_->Wait(self); // Wait for test completion.
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -174,7 +174,7 @@
InterruptTask(MonitorTest* monitor_test, uint64_t initial_sleep, uint64_t millis) :
monitor_test_(monitor_test), initial_sleep_(initial_sleep), millis_(millis) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
monitor_test_->barrier_->Wait(self); // Wait for the other thread to set up the monitor.
{
@@ -202,7 +202,7 @@
monitor_test_->complete_barrier_->Wait(self); // Wait for test completion.
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -216,7 +216,7 @@
public:
explicit WatchdogTask(MonitorTest* monitor_test) : monitor_test_(monitor_test) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
monitor_test_->watchdog_object_.Get()->MonitorEnter(self); // Lock the object.
@@ -231,7 +231,7 @@
}
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -326,14 +326,14 @@
public:
explicit TryLockTask(Handle<mirror::Object> obj) : obj_(obj) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
// Lock is held by other thread, try lock should fail.
ObjectTryLock<mirror::Object> lock(self, obj_);
EXPECT_FALSE(lock.Acquired());
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 71fabd0..6becd36 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -20,6 +20,7 @@
#include "android-base/stringprintf.h"
+#include "base/casts.h"
#include "base/file_utils.h"
#include "base/logging.h"
#include "base/os.h"
@@ -74,10 +75,10 @@
return false;
}
- oat_file = reinterpret_cast<const OatFile*>(static_cast<uintptr_t>(long_data[kOatFileIndex]));
+ oat_file = reinterpret_cast64<const OatFile*>(long_data[kOatFileIndex]);
dex_files.reserve(array_size - 1);
for (jsize i = kDexFileIndexStart; i < array_size; ++i) {
- dex_files.push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(long_data[i])));
+ dex_files.push_back(reinterpret_cast64<const DexFile*>(long_data[i]));
}
env->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array), long_data, JNI_ABORT);
@@ -99,9 +100,9 @@
return nullptr;
}
- long_data[kOatFileIndex] = reinterpret_cast<uintptr_t>(oat_file);
+ long_data[kOatFileIndex] = reinterpret_cast64<jlong>(oat_file);
for (size_t i = 0; i < vec.size(); ++i) {
- long_data[kDexFileIndexStart + i] = reinterpret_cast<uintptr_t>(vec[i].get());
+ long_data[kDexFileIndexStart + i] = reinterpret_cast64<jlong>(vec[i].get());
}
env->ReleaseLongArrayElements(long_array, long_data, 0);
@@ -111,7 +112,7 @@
// Now release all the unique_ptrs.
for (auto& dex_file : vec) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
return long_array;
@@ -294,7 +295,7 @@
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
if (linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
- dex_file.release();
+ dex_file.release(); // NOLINT
}
}
}
@@ -323,6 +324,9 @@
}
Runtime* const runtime = Runtime::Current();
bool all_deleted = true;
+ // We need to clear the caches since they may contain pointers to the dex instructions.
+ // Different dex file can be loaded at the same memory location later by chance.
+ Thread::ClearAllInterpreterCaches();
{
ScopedObjectAccess soa(env);
ObjPtr<mirror::Object> dex_files_object = soa.Decode<mirror::Object>(cookie);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 0e61940..861d1db 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -325,7 +325,7 @@
Runtime::Current()->GetHeap()->GetTaskProcessor()->RunAllTasks(ThreadForEnv(env));
}
-typedef std::map<std::string, ObjPtr<mirror::String>> StringTable;
+using StringTable = std::map<std::string, ObjPtr<mirror::String>>;
class PreloadDexCachesStringsVisitor : public SingleRootVisitor {
public:
@@ -682,6 +682,11 @@
Runtime::Current()->SetProcessPackageName(package_name.c_str());
}
+static jboolean VMRuntime_hasBootImageSpaces(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+ return Runtime::Current()->GetHeap()->HasBootImageSpace() ? JNI_TRUE : JNI_FALSE;
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -690,6 +695,7 @@
NATIVE_METHOD(VMRuntime, clearGrowthLimit, "()V"),
NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
+ FAST_NATIVE_METHOD(VMRuntime, hasBootImageSpaces, "()Z"), // Could be CRITICAL.
NATIVE_METHOD(VMRuntime, hasUsedHiddenApi, "()Z"),
NATIVE_METHOD(VMRuntime, setHiddenApiExemptions, "([Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, setHiddenApiAccessLogSamplingRate, "(I)V"),
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 3919227..e3932df 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -113,7 +113,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_loader(nullptr) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
ObjPtr<mirror::Class> c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5a5fb16..f5039d1 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -62,7 +62,7 @@
caller(nullptr) {
}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod *m = GetMethod();
if (m == nullptr) {
// Attached native thread. Assume this is *not* boot class path.
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 496a6f3..439f485 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -31,11 +31,6 @@
void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
- // This is only used by compilers which need to be able to run without relocation even when it
- // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
- // to disable the relocation since both deal with writing out the images directly.
- bool IsRelocationPossible() override { return false; }
-
verifier::VerifierDeps* GetVerifierDeps() const override { return nullptr; }
private:
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 39dc8da..519eed7 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -81,9 +81,7 @@
quick_imt_conflict_trampoline_offset_(0),
quick_resolution_trampoline_offset_(0),
quick_to_interpreter_bridge_offset_(0),
- image_patch_delta_(0),
- image_file_location_oat_checksum_(0),
- image_file_location_oat_data_begin_(0) {
+ image_file_location_oat_checksum_(0) {
// Don't want asserts in header as they would be checked in each file that includes it. But the
// fields are private, so we check inside a method.
static_assert(sizeof(magic_) == sizeof(kOatMagic),
@@ -110,9 +108,6 @@
if (!IsAligned<kPageSize>(executable_offset_)) {
return false;
}
- if (!IsAligned<kPageSize>(image_patch_delta_)) {
- return false;
- }
if (!IsValidInstructionSet(instruction_set_)) {
return false;
}
@@ -135,9 +130,6 @@
if (!IsAligned<kPageSize>(executable_offset_)) {
return "Executable offset not page-aligned.";
}
- if (!IsAligned<kPageSize>(image_patch_delta_)) {
- return "Image patch delta not page-aligned.";
- }
if (!IsValidInstructionSet(instruction_set_)) {
return StringPrintf("Invalid instruction set, %d.", static_cast<int>(instruction_set_));
}
@@ -159,7 +151,6 @@
UpdateChecksum(&instruction_set_features_bitmap_, sizeof(instruction_set_features_bitmap_));
UpdateChecksum(&dex_file_count_, sizeof(dex_file_count_));
UpdateChecksum(&image_file_location_oat_checksum_, sizeof(image_file_location_oat_checksum_));
- UpdateChecksum(&image_file_location_oat_data_begin_, sizeof(image_file_location_oat_data_begin_));
// Update checksum for variable data size.
UpdateChecksum(&key_value_store_size_, sizeof(key_value_store_size_));
@@ -362,26 +353,6 @@
quick_to_interpreter_bridge_offset_ = offset;
}
-int32_t OatHeader::GetImagePatchDelta() const {
- CHECK(IsValid());
- return image_patch_delta_;
-}
-
-void OatHeader::RelocateOat(off_t delta) {
- CHECK(IsValid());
- CHECK_ALIGNED(delta, kPageSize);
- image_patch_delta_ += delta;
- if (image_file_location_oat_data_begin_ != 0) {
- image_file_location_oat_data_begin_ += delta;
- }
-}
-
-void OatHeader::SetImagePatchDelta(int32_t off) {
- CHECK(IsValid());
- CHECK_ALIGNED(off, kPageSize);
- image_patch_delta_ = off;
-}
-
uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
CHECK(IsValid());
return image_file_location_oat_checksum_;
@@ -392,17 +363,6 @@
image_file_location_oat_checksum_ = image_file_location_oat_checksum;
}
-uint32_t OatHeader::GetImageFileLocationOatDataBegin() const {
- CHECK(IsValid());
- return image_file_location_oat_data_begin_;
-}
-
-void OatHeader::SetImageFileLocationOatDataBegin(uint32_t image_file_location_oat_data_begin) {
- CHECK(IsValid());
- CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize);
- image_file_location_oat_data_begin_ = image_file_location_oat_data_begin;
-}
-
uint32_t OatHeader::GetKeyValueStoreSize() const {
CHECK(IsValid());
return key_value_store_size_;
@@ -481,10 +441,6 @@
return sizeof(OatHeader) + key_value_store_size_;
}
-bool OatHeader::IsPic() const {
- return IsKeyEnabled(OatHeader::kPicKey);
-}
-
bool OatHeader::IsDebuggable() const {
return IsKeyEnabled(OatHeader::kDebuggableKey);
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 037c8f9..5c5a02d 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,13 +32,12 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Add stack map fast path for GC.
- static constexpr uint8_t kOatVersion[] = { '1', '6', '1', '\0' };
+ // Last oat version changed reason: Remove interpreter alt tables.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '3', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDex2OatHostKey = "dex2oat-host";
- static constexpr const char* kPicKey = "pic";
static constexpr const char* kDebuggableKey = "debuggable";
static constexpr const char* kNativeDebuggableKey = "native-debuggable";
static constexpr const char* kCompilerFilter = "compiler-filter";
@@ -95,17 +94,11 @@
uint32_t GetQuickToInterpreterBridgeOffset() const;
void SetQuickToInterpreterBridgeOffset(uint32_t offset);
- int32_t GetImagePatchDelta() const;
- void RelocateOat(off_t delta);
- void SetImagePatchDelta(int32_t off);
-
InstructionSet GetInstructionSet() const;
uint32_t GetInstructionSetFeaturesBitmap() const;
uint32_t GetImageFileLocationOatChecksum() const;
void SetImageFileLocationOatChecksum(uint32_t image_file_location_oat_checksum);
- uint32_t GetImageFileLocationOatDataBegin() const;
- void SetImageFileLocationOatDataBegin(uint32_t image_file_location_oat_data_begin);
uint32_t GetKeyValueStoreSize() const;
const uint8_t* GetKeyValueStore() const;
@@ -113,7 +106,6 @@
bool GetStoreKeyValuePairByIndex(size_t index, const char** key, const char** value) const;
size_t GetHeaderSize() const;
- bool IsPic() const;
bool IsDebuggable() const;
bool IsNativeDebuggable() const;
CompilerFilter::Filter GetCompilerFilter() const;
@@ -149,11 +141,7 @@
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
- // The amount that the image this oat is associated with has been patched.
- int32_t image_patch_delta_;
-
uint32_t image_file_location_oat_checksum_;
- uint32_t image_file_location_oat_data_begin_;
uint32_t key_value_store_size_;
uint8_t key_value_store_[0]; // note variable width data at end
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d5246b4..f16c46b 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -101,7 +101,6 @@
const std::string& vdex_filename,
const std::string& elf_filename,
const std::string& location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -115,7 +114,6 @@
int oat_fd,
const std::string& vdex_filename,
const std::string& oat_filename,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -156,9 +154,7 @@
/*inout*/MemMap* reservation, // Where to load if not null.
/*out*/std::string* error_msg) = 0;
- bool ComputeFields(uint8_t* requested_base,
- const std::string& file_path,
- std::string* error_msg);
+ bool ComputeFields(const std::string& file_path, std::string* error_msg);
virtual void PreSetup(const std::string& elf_filename) = 0;
@@ -187,7 +183,6 @@
const std::string& vdex_filename,
const std::string& elf_filename,
const std::string& location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -207,7 +202,7 @@
return nullptr;
}
- if (!ret->ComputeFields(requested_base, elf_filename, error_msg)) {
+ if (!ret->ComputeFields(elf_filename, error_msg)) {
return nullptr;
}
@@ -230,7 +225,6 @@
int oat_fd,
const std::string& vdex_location,
const std::string& oat_location,
- uint8_t* requested_base,
bool writable,
bool executable,
bool low_4gb,
@@ -248,7 +242,7 @@
return nullptr;
}
- if (!ret->ComputeFields(requested_base, oat_location, error_msg)) {
+ if (!ret->ComputeFields(oat_location, error_msg)) {
return nullptr;
}
@@ -271,7 +265,7 @@
std::string* error_msg) {
vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
vdex_end_ - vdex_begin_,
- vdex_begin_ != nullptr /* mmap_reuse */,
+ /*mmap_reuse=*/ vdex_begin_ != nullptr,
vdex_filename,
writable,
low_4gb,
@@ -299,13 +293,13 @@
} else {
vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
vdex_end_ - vdex_begin_,
- vdex_begin_ != nullptr /* mmap_reuse */,
+ /*mmap_reuse=*/ vdex_begin_ != nullptr,
vdex_fd,
s.st_size,
vdex_filename,
writable,
low_4gb,
- false /* unquicken */,
+ /*unquicken=*/ false,
error_msg);
if (vdex_.get() == nullptr) {
*error_msg = "Failed opening vdex file.";
@@ -316,9 +310,7 @@
return true;
}
-bool OatFileBase::ComputeFields(uint8_t* requested_base,
- const std::string& file_path,
- std::string* error_msg) {
+bool OatFileBase::ComputeFields(const std::string& file_path, std::string* error_msg) {
std::string symbol_error_msg;
begin_ = FindDynamicSymbolAddress("oatdata", &symbol_error_msg);
if (begin_ == nullptr) {
@@ -327,16 +319,6 @@
symbol_error_msg.c_str());
return false;
}
- if (requested_base != nullptr && begin_ != requested_base) {
- // Host can fail this check. Do not dump there to avoid polluting the output.
- if (kIsTargetBuild && (kIsDebugBuild || VLOG_IS_ON(oat))) {
- PrintFileToLog("/proc/self/maps", android::base::LogSeverity::WARNING);
- }
- *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: "
- "oatdata=%p != expected=%p. See process maps in the log.",
- begin_, requested_base);
- return false;
- }
end_ = FindDynamicSymbolAddress("oatlastword", &symbol_error_msg);
if (end_ == nullptr) {
*error_msg = StringPrintf("Failed to find oatlastword symbol in '%s' %s",
@@ -410,7 +392,7 @@
return false;
}
static_assert(std::is_trivial<T>::value, "T must be a trivial type");
- typedef __attribute__((__aligned__(1))) T unaligned_type;
+ using unaligned_type __attribute__((__aligned__(1))) = T;
*value = *reinterpret_cast<const unaligned_type*>(*oat);
*oat += sizeof(T);
return true;
@@ -649,15 +631,15 @@
if (zip_fd != -1) {
loaded = dex_file_loader.OpenZip(zip_fd,
dex_file_location,
- /* verify */ false,
- /* verify_checksum */ false,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg,
uncompressed_dex_files_.get());
} else {
loaded = dex_file_loader.Open(dex_file_location.c_str(),
dex_file_location,
- /* verify */ false,
- /* verify_checksum */ false,
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
error_msg,
uncompressed_dex_files_.get());
}
@@ -1045,7 +1027,7 @@
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename, oat_file_begin);
+ UNUSED(elf_filename, reservation);
*error_msg = "Dlopen unsupported on Mac.";
return false;
#else
@@ -1323,7 +1305,7 @@
}
// Complete the setup.
- if (!oat_file->ComputeFields(/* requested_base */ nullptr, file->GetPath(), error_msg)) {
+ if (!oat_file->ComputeFields(file->GetPath(), error_msg)) {
return nullptr;
}
@@ -1383,8 +1365,8 @@
/*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
if (oat_fd != -1) {
- std::unique_ptr<File> file = std::make_unique<File>(oat_fd, false);
- file->DisableAutoClose();
+ int duped_fd = DupCloexec(oat_fd);
+ std::unique_ptr<File> file = std::make_unique<File>(duped_fd, false);
if (file == nullptr) {
*error_msg = StringPrintf("Failed to open oat filename for reading: %s",
strerror(errno));
@@ -1407,10 +1389,9 @@
/*inout*/MemMap* reservation,
/*out*/std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- // TODO: rename requested_base to oat_data_begin
elf_file_.reset(ElfFile::Open(file,
writable,
- /*program_header_only*/true,
+ /*program_header_only=*/ true,
low_4gb,
error_msg));
if (elf_file_ == nullptr) {
@@ -1458,7 +1439,7 @@
const std::string& location,
const char* abs_dex_location,
std::string* error_msg) {
- std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, false /* executable */));
+ std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, /*executable=*/ false));
return oat_file->InitializeFromElfFile(zip_fd, elf_file, vdex_file, abs_dex_location, error_msg)
? oat_file.release()
: nullptr;
@@ -1467,7 +1448,6 @@
OatFile* OatFile::Open(int zip_fd,
const std::string& oat_filename,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -1494,8 +1474,7 @@
vdex_filename,
oat_filename,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1524,8 +1503,7 @@
vdex_filename,
oat_filename,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1538,7 +1516,6 @@
int vdex_fd,
int oat_fd,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -1553,8 +1530,7 @@
oat_fd,
vdex_location,
oat_location,
- requested_base,
- false /* writable */,
+ /*writable=*/ false,
executable,
low_4gb,
abs_dex_location,
@@ -1572,11 +1548,11 @@
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- /* writable */ true,
- /* executable */ false,
- /*low_4gb*/false,
+ /*writable=*/ true,
+ /*executable=*/ false,
+ /*low_4gb=*/false,
abs_dex_location,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
error_msg);
}
@@ -1589,11 +1565,11 @@
return ElfOatFile::OpenElfFile(zip_fd,
file,
location,
- /* writable */ false,
- /* executable */ false,
- /*low_4gb*/false,
+ /*writable=*/ false,
+ /*executable=*/ false,
+ /*low_4gb=*/false,
abs_dex_location,
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
error_msg);
}
@@ -1995,11 +1971,6 @@
method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
}
-bool OatFile::IsPic() const {
- return GetOatHeader().IsPic();
- // TODO: Check against oat_patches. b/18144996
-}
-
bool OatFile::IsDebuggable() const {
return GetOatHeader().IsDebuggable();
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index f20c603..ba08e5e 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -85,7 +85,6 @@
static OatFile* Open(int zip_fd,
const std::string& filename,
const std::string& location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -99,7 +98,6 @@
int vdex_fd,
int oat_fd,
const std::string& oat_location,
- uint8_t* requested_base,
bool executable,
bool low_4gb,
const char* abs_dex_location,
@@ -129,8 +127,6 @@
return is_executable_;
}
- bool IsPic() const;
-
// Indicates whether the oat file was compiled with full debugging capability.
bool IsDebuggable() const;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 4ed7e35..754aa40 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -58,9 +58,6 @@
case OatFileAssistant::kOatBootImageOutOfDate:
stream << "kOatBootImageOutOfDate";
break;
- case OatFileAssistant::kOatRelocationOutOfDate:
- stream << "kOatRelocationOutOfDate";
- break;
case OatFileAssistant::kOatUpToDate:
stream << "kOatUpToDate";
break;
@@ -79,9 +76,9 @@
isa,
load_executable,
only_load_system_executable,
- -1 /* vdex_fd */,
- -1 /* oat_fd */,
- -1 /* zip_fd */) {}
+ /*vdex_fd=*/ -1,
+ /*oat_fd=*/ -1,
+ /*zip_fd=*/ -1) {}
OatFileAssistant::OatFileAssistant(const char* dex_location,
@@ -127,7 +124,7 @@
// Get the oat filename.
std::string oat_file_name;
if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
- oat_.Reset(oat_file_name, false /* use_fd */);
+ oat_.Reset(oat_file_name, /*use_fd=*/ false);
} else {
LOG(WARNING) << "Failed to determine oat file name for dex location "
<< dex_location_ << ": " << error_msg;
@@ -442,43 +439,6 @@
return kOatDexOutOfDate;
}
- if (CompilerFilter::IsAotCompilationEnabled(current_compiler_filter)) {
- if (!file.IsPic()) {
- const ImageInfo* image_info = GetImageInfo();
- if (image_info == nullptr) {
- VLOG(oat) << "No image to check oat relocation against.";
- return kOatRelocationOutOfDate;
- }
-
- // Verify the oat_data_begin recorded for the image in the oat file matches
- // the actual oat_data_begin for boot.oat in the image.
- const OatHeader& oat_header = file.GetOatHeader();
- uintptr_t oat_data_begin = oat_header.GetImageFileLocationOatDataBegin();
- if (oat_data_begin != image_info->oat_data_begin) {
- VLOG(oat) << file.GetLocation() <<
- ": Oat file image oat_data_begin (" << oat_data_begin << ")"
- << " does not match actual image oat_data_begin ("
- << image_info->oat_data_begin << ")";
- return kOatRelocationOutOfDate;
- }
-
- // Verify the oat_patch_delta recorded for the image in the oat file matches
- // the actual oat_patch_delta for the image.
- int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
- if (oat_patch_delta != image_info->patch_delta) {
- VLOG(oat) << file.GetLocation() <<
- ": Oat file image patch delta (" << oat_patch_delta << ")"
- << " does not match actual image patch delta ("
- << image_info->patch_delta << ")";
- return kOatRelocationOutOfDate;
- }
- } else {
- // Oat files compiled in PIC mode do not require relocation.
- VLOG(oat) << "Oat relocation test skipped for PIC oat file";
- }
- } else {
- VLOG(oat) << "Oat relocation test skipped for compiler filter " << current_compiler_filter;
- }
return kOatUpToDate;
}
@@ -615,7 +575,6 @@
}
info->oat_checksum = image_header->GetOatChecksum();
- info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
info->patch_delta = image_header->GetPatchDelta();
return info;
}
@@ -709,7 +668,6 @@
case kOatDexOutOfDate:
case kOatBootImageOutOfDate: return false;
- case kOatRelocationOutOfDate:
case kOatUpToDate: return true;
}
UNREACHABLE();
@@ -734,9 +692,9 @@
vdex = VdexFile::Open(vdex_fd_,
s.st_size,
vdex_filename,
- false /*writable*/,
- false /*low_4gb*/,
- false /* unquicken */,
+ /*writable=*/ false,
+ /*low_4gb=*/ false,
+ /*unquicken=*/ false,
&error_msg);
}
}
@@ -776,7 +734,6 @@
bool downgrade,
ClassLoaderContext* context) {
- bool compilation_desired = CompilerFilter::IsAotCompilationEnabled(target);
bool filter_okay = CompilerFilterIsOkay(target, profile_changed, downgrade);
bool class_loader_context_okay = ClassLoaderContextIsOkay(context);
@@ -788,16 +745,6 @@
return kNoDexOptNeeded;
}
- if (filter_okay && !compilation_desired && Status() == kOatRelocationOutOfDate) {
- // If no compilation is desired, then it doesn't matter if the oat
- // file needs relocation. It's in good shape as is.
- return kNoDexOptNeeded;
- }
-
- if (filter_okay && Status() == kOatRelocationOutOfDate) {
- return kDex2OatForRelocation;
- }
-
if (IsUseable()) {
return kDex2OatForFilter;
}
@@ -831,22 +778,20 @@
vdex_fd_,
oat_fd_,
filename_.c_str(),
- /* requested_base */ nullptr,
executable,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
oat_file_assistant_->dex_location_.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
}
} else {
- file_.reset(OatFile::Open(/* zip_fd */ -1,
+ file_.reset(OatFile::Open(/*zip_fd=*/ -1,
filename_.c_str(),
filename_.c_str(),
- /* requested_base */ nullptr,
executable,
- /* low_4gb */ false,
+ /*low_4gb=*/ false,
oat_file_assistant_->dex_location_.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
}
if (file_.get() == nullptr) {
@@ -945,10 +890,6 @@
VLOG(oat) << "Oat File Assistant: No relocated oat file found,"
<< " attempting to fall back to interpreting oat file instead.";
- if (Status() == kOatRelocationOutOfDate && !IsExecutable()) {
- return ReleaseFile();
- }
-
switch (Status()) {
case kOatBootImageOutOfDate:
// OutOfDate may be either a mismatched image, or a missing image.
@@ -961,19 +902,6 @@
// go forward.
FALLTHROUGH_INTENDED;
- case kOatRelocationOutOfDate:
- // We are loading an oat file for runtime use that needs relocation.
- // Reload the file non-executable to ensure that we interpret out of the
- // dex code in the oat file rather than trying to execute the unrelocated
- // compiled code.
- oat_file_assistant_->load_executable_ = false;
- Reset();
- if (IsUseable()) {
- CHECK(!IsExecutable());
- return ReleaseFile();
- }
- break;
-
case kOatUpToDate:
case kOatCannotOpen:
case kOatDexOutOfDate:
@@ -993,7 +921,7 @@
std::string* out_compilation_reason) {
// It may not be possible to load an oat file executable (e.g., selinux restrictions). Load
// non-executable and check the status manually.
- OatFileAssistant oat_file_assistant(filename.c_str(), isa, false /* load_executable */);
+ OatFileAssistant oat_file_assistant(filename.c_str(), isa, /*load_executable=*/ false);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
if (oat_file == nullptr) {
@@ -1026,11 +954,6 @@
*out_compilation_filter = "run-from-vdex-fallback";
}
return;
-
- case kOatRelocationOutOfDate:
- // On relocation-out-of-date, we'd run the dex code.
- *out_compilation_filter = "run-from-vdex-fallback";
- return;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index dbfbdf9..590ae22 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -66,11 +66,6 @@
// is out of date with respect to the target compiler filter.
// Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_FILTER
kDex2OatForFilter = 3,
-
- // dex2oat should be run to update the apk/jar because the existing code
- // is not relocated to match the boot image.
- // Matches Java: dalvik.system.DexFile.DEX2OAT_FOR_RELOCATION
- kDex2OatForRelocation = 4,
};
enum OatStatus {
@@ -85,13 +80,6 @@
// dex file, but is out of date with respect to the boot image.
kOatBootImageOutOfDate,
- // kOatRelocationOutOfDate - The oat file is up to date with respect to
- // the dex file and boot image, but contains compiled code that has the
- // wrong patch delta with respect to the boot image. Patchoat should be
- // run on the oat file to update the patch delta of the compiled code to
- // match the boot image.
- kOatRelocationOutOfDate,
-
// kOatUpToDate - The oat file is completely up to date with respect to
// the dex file and boot image.
kOatUpToDate,
@@ -259,7 +247,6 @@
private:
struct ImageInfo {
uint32_t oat_checksum = 0;
- uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 5a29978..3a974df 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -228,13 +228,13 @@
EXPECT_EQ(nullptr, oat_file.get());
}
-// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
-// Expect: The status is kNoDexOptNeeded, because PIC needs no relocation.
+// Case: We have a DEX file and an ODEX file, but no OAT file.
+// Expect: The status is kNoDexOptNeeded.
TEST_F(OatFileAssistantTest, OdexUpToDate) {
std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
// For the use of oat location by making the dex parent not writable.
OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
@@ -258,14 +258,14 @@
// Case: We have a DEX file and a PIC ODEX file, but no OAT file. We load the dex
// file via a symlink.
-// Expect: The status is kNoDexOptNeeded, because PIC needs no relocation.
+// Expect: The status is kNoDexOptNeeded.
TEST_F(OatFileAssistantTest, OdexUpToDateSymLink) {
std::string scratch_dir = GetScratchDir();
std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
// Now replace the dex location with a symlink.
std::string link = scratch_dir + "/link";
@@ -325,8 +325,7 @@
VerifyOptimizationStatus(dex_location, CompilerFilter::kSpeed, "unknown");
}
-// Case: Passing valid file descriptors of updated odex/vdex filesalong with
-// the dex file.
+// Case: Passing valid file descriptors of updated odex/vdex files along with the dex file.
// Expect: The status is kNoDexOptNeeded.
TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
@@ -337,9 +336,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- true,
- false,
- false);
+ /* with_alternate_image */ false);
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
@@ -378,9 +375,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- true,
- false,
- false);
+ /* with_alternate_image */ false);
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
@@ -413,9 +408,7 @@
GenerateOatForTest(dex_location.c_str(),
odex_location.c_str(),
CompilerFilter::kSpeed,
- true,
- false,
- false);
+ /* with_alternate_image */ false);
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY));
@@ -737,9 +730,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kSpeed,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/true);
+ /* with_alternate_image */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -774,9 +765,7 @@
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(),
CompilerFilter::kExtract,
- /*relocate*/true,
- /*pic*/false,
- /*with_alternate_image*/true);
+ /* with_alternate_image */ true);
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
@@ -807,11 +796,11 @@
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForRelocation,
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
- EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
@@ -827,7 +816,7 @@
// Create the dex and odex files
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
// Strip the dex file
Copy(GetStrippedDexSrc1(), dex_location);
@@ -863,7 +852,7 @@
// Create the odex file
Copy(GetDexSrc1(), dex_location);
- GeneratePicOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
// Strip the dex file.
Copy(GetStrippedDexSrc1(), dex_location);
@@ -923,9 +912,8 @@
EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
}
-// Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and
-// OAT files both have patch delta of 0.
-// Expect: It shouldn't crash.
+// Case: We have a DEX file, an ODEX file and an OAT file.
+// Expect: It shouldn't crash. We should load the odex file executable.
TEST_F(OatFileAssistantTest, OdexOatOverlap) {
std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex";
@@ -933,31 +921,23 @@
// Create the dex, the odex and the oat files.
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
- GenerateOatForTest(dex_location.c_str(),
- CompilerFilter::kSpeed,
- /*relocate*/false,
- /*pic*/false,
- /*with_alternate_image*/false);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
// Verify things don't go bad.
OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
- // -kDex2OatForRelocation is expected rather than kDex2OatForRelocation
- // based on the assumption that the odex location is more up-to-date than the oat
- // location, even if they both need relocation.
- EXPECT_EQ(-OatFileAssistant::kDex2OatForRelocation,
- oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
- EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OdexFileStatus());
- EXPECT_EQ(OatFileAssistant::kOatRelocationOutOfDate, oat_file_assistant.OatFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OatFileStatus());
EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
- // Things aren't relocated, so it should fall back to interpreted.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_FALSE(oat_file->IsExecutable());
+ EXPECT_TRUE(oat_file->IsExecutable());
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1159,29 +1139,29 @@
// A task to generate a dex location. Used by the RaceToGenerate test.
class RaceGenerateTask : public Task {
public:
- RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
- : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr)
+ RaceGenerateTask(const std::string& dex_location,
+ const std::string& oat_location,
+ Mutex* lock)
+ : dex_location_(dex_location),
+ oat_location_(oat_location),
+ lock_(lock),
+ loaded_oat_file_(nullptr)
{}
- void Run(Thread* self ATTRIBUTE_UNUSED) {
+ void Run(Thread* self ATTRIBUTE_UNUSED) override {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
const OatFile* oat_file = nullptr;
{
+ MutexLock mu(Thread::Current(), *lock_);
// Create the oat file.
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location_);
args.push_back("--oat-file=" + oat_location_);
std::string error_msg;
- if (kIsTargetBuild) {
- // Don't check whether dex2oat is successful: given we're running kNumThreads in
- // parallel, low memory killer might just kill some of the dex2oat invocations.
- DexoptTest::Dex2Oat(args, &error_msg);
- } else {
- ASSERT_TRUE(DexoptTest::Dex2Oat(args, &error_msg)) << error_msg;
- }
+ ASSERT_TRUE(DexoptTest::Dex2Oat(args, &error_msg)) << error_msg;
}
dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
@@ -1204,6 +1184,7 @@
private:
std::string dex_location_;
std::string oat_location_;
+ Mutex* lock_;
const OatFile* loaded_oat_file_;
};
@@ -1225,8 +1206,9 @@
Thread* self = Thread::Current();
ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
+ Mutex lock("RaceToGenerate");
for (size_t i = 0; i < kNumThreads; i++) {
- std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location));
+ std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location, &lock));
thread_pool.AddTask(self, task.get());
tasks.push_back(std::move(task));
}
@@ -1245,7 +1227,7 @@
}
// Case: We have a DEX file and an ODEX file, and no OAT file,
-// Expect: We should load the odex file non-executable.
+// Expect: We should load the odex file executable.
TEST_F(DexoptTest, LoadDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadDexOdexNoOat.odex";
@@ -1259,14 +1241,14 @@
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_FALSE(oat_file->IsExecutable());
+ EXPECT_TRUE(oat_file->IsExecutable());
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
}
// Case: We have a MultiDEX file and an ODEX file, and no OAT file.
-// Expect: We should load the odex file non-executable.
+// Expect: We should load the odex file executable.
TEST_F(DexoptTest, LoadMultiDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadMultiDexOdexNoOat.odex";
@@ -1280,7 +1262,7 @@
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_FALSE(oat_file->IsExecutable());
+ EXPECT_TRUE(oat_file->IsExecutable());
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(2u, dex_files.size());
@@ -1312,7 +1294,6 @@
{OatFileAssistant::kDex2OatFromScratch, "DEX2OAT_FROM_SCRATCH"},
{OatFileAssistant::kDex2OatForBootImage, "DEX2OAT_FOR_BOOT_IMAGE"},
{OatFileAssistant::kDex2OatForFilter, "DEX2OAT_FOR_FILTER"},
- {OatFileAssistant::kDex2OatForRelocation, "DEX2OAT_FOR_RELOCATION"},
};
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index bcad4a3..b9e9d38 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -72,7 +72,6 @@
CHECK_NE(oat_file->Begin(), existing->Begin()) << "Oat file already mapped at that location";
}
}
- have_non_pic_oat_file_ = have_non_pic_oat_file_ || !oat_file->IsPic();
const OatFile* ret = oat_file.get();
oat_files_.insert(std::move(oat_file));
return ret;
@@ -85,7 +84,7 @@
auto it = oat_files_.find(compare);
CHECK(it != oat_files_.end());
oat_files_.erase(it);
- compare.release();
+ compare.release(); // NOLINT b/117926937
}
const OatFile* OatFileManager::FindOpenedOatFileFromDexLocation(
@@ -143,7 +142,7 @@
}
OatFileManager::OatFileManager()
- : have_non_pic_oat_file_(false), only_use_system_oat_files_(false) {}
+ : only_use_system_oat_files_(false) {}
OatFileManager::~OatFileManager() {
// Explicitly clear oat_files_ since the OatFile destructor calls back into OatFileManager for
@@ -152,7 +151,7 @@
}
std::vector<const OatFile*> OatFileManager::RegisterImageOatFiles(
- std::vector<gc::space::ImageSpace*> spaces) {
+ const std::vector<gc::space::ImageSpace*>& spaces) {
std::vector<const OatFile*> oat_files;
for (gc::space::ImageSpace* space : spaces) {
oat_files.push_back(RegisterOatFile(space->ReleaseOatFile()));
@@ -527,6 +526,8 @@
if (source_oat_file != nullptr) {
bool added_image_space = false;
if (source_oat_file->IsExecutable()) {
+ ScopedTrace app_image_timing("AppImage:Loading");
+
// We need to throw away the image space if we are debuggable but the oat-file source of the
// image is not otherwise we might get classes with inlined methods or other such things.
std::unique_ptr<gc::space::ImageSpace> image_space;
@@ -566,7 +567,7 @@
if (added_image_space) {
// Successfully added image space to heap, release the map so that it does not get
// freed.
- image_space.release();
+ image_space.release(); // NOLINT b/117926937
// Register for tracking.
for (const auto& dex_file : dex_files) {
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index 80456e9..7d96a7a 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -65,11 +65,6 @@
const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_base_location) const
REQUIRES(!Locks::oat_file_manager_lock_);
- // Returns true if we have a non pic oat file.
- bool HaveNonPicOatFile() const {
- return have_non_pic_oat_file_;
- }
-
// Returns the boot image oat files.
std::vector<const OatFile*> GetBootOatFiles() const;
@@ -78,7 +73,8 @@
// Returns the oat files for the images, registers the oat files.
// Takes ownership of the imagespace's underlying oat files.
- std::vector<const OatFile*> RegisterImageOatFiles(std::vector<gc::space::ImageSpace*> spaces)
+ std::vector<const OatFile*> RegisterImageOatFiles(
+ const std::vector<gc::space::ImageSpace*>& spaces)
REQUIRES(!Locks::oat_file_manager_lock_);
// Finds or creates the oat file holding dex_location. Then loads and returns
@@ -142,7 +138,6 @@
std::string* error_msg);
std::set<std::unique_ptr<const OatFile>> oat_files_ GUARDED_BY(Locks::oat_file_manager_lock_);
- bool have_non_pic_oat_file_;
// Only use the compiled code in an OAT file when the file is on /system. If the OAT file
// is not on /system, don't load it "executable".
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 51d8fca..b547113 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -74,14 +74,13 @@
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr);
@@ -102,14 +101,13 @@
// Ensure we can load that file. Just a precondition.
{
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location.c_str(),
oat_location.c_str(),
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
ASSERT_TRUE(odex_file != nullptr);
ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
@@ -119,14 +117,13 @@
Copy(GetTestDexFileName("MainUncompressed"), dex_location);
// And try to load again.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(/* zip_fd */ -1,
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
oat_location,
oat_location,
- /* requested_base */ nullptr,
- /* executable */ false,
- /* low_4gb */ false,
+ /*executable=*/ false,
+ /*low_4gb=*/ false,
dex_location.c_str(),
- /* reservation */ nullptr,
+ /*reservation=*/ nullptr,
&error_msg));
EXPECT_TRUE(odex_file == nullptr);
EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/obj_ptr-inl.h b/runtime/obj_ptr-inl.h
index f1e3b50..b949c96 100644
--- a/runtime/obj_ptr-inl.h
+++ b/runtime/obj_ptr-inl.h
@@ -24,18 +24,27 @@
namespace art {
template<class MirrorType>
+inline uintptr_t ObjPtr<MirrorType>::GetCurrentTrimedCookie() {
+ Thread* self = Thread::Current();
+ if (UNLIKELY(self == nullptr)) {
+ return kCookieMask;
+ }
+ return self->GetPoisonObjectCookie() & kCookieMask;
+}
+
+template<class MirrorType>
inline bool ObjPtr<MirrorType>::IsValid() const {
if (!kObjPtrPoisoning || IsNull()) {
return true;
}
- return GetCookie() == TrimCookie(Thread::Current()->GetPoisonObjectCookie());
+ return GetCookie() == GetCurrentTrimedCookie();
}
template<class MirrorType>
inline void ObjPtr<MirrorType>::AssertValid() const {
if (kObjPtrPoisoning) {
CHECK(IsValid()) << "Stale object pointer " << PtrUnchecked() << " , expected cookie "
- << TrimCookie(Thread::Current()->GetPoisonObjectCookie()) << " but got " << GetCookie();
+ << GetCurrentTrimedCookie() << " but got " << GetCookie();
}
}
@@ -47,9 +56,7 @@
DCHECK_LE(ref, 0xFFFFFFFFU);
ref >>= kObjectAlignmentShift;
// Put cookie in high bits.
- Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- ref |= self->GetPoisonObjectCookie() << kCookieShift;
+ ref |= GetCurrentTrimedCookie() << kCookieShift;
}
return ref;
}
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index e421d87..60e21ab 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -156,9 +156,7 @@
private:
// Trim off high bits of thread local cookie.
- ALWAYS_INLINE static uintptr_t TrimCookie(uintptr_t cookie) {
- return cookie & kCookieMask;
- }
+ ALWAYS_INLINE static uintptr_t GetCurrentTrimedCookie();
ALWAYS_INLINE uintptr_t GetCookie() const {
return reference_ >> kCookieShift;
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 4df9b27..372b821 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -27,14 +27,14 @@
// Allow the meaning of offsets to be strongly typed.
class Offset {
public:
- explicit Offset(size_t val) : val_(val) {}
- int32_t Int32Value() const {
+ constexpr explicit Offset(size_t val) : val_(val) {}
+ constexpr int32_t Int32Value() const {
return static_cast<int32_t>(val_);
}
- uint32_t Uint32Value() const {
+ constexpr uint32_t Uint32Value() const {
return static_cast<uint32_t>(val_);
}
- size_t SizeValue() const {
+ constexpr size_t SizeValue() const {
return val_;
}
@@ -46,7 +46,7 @@
// Offsets relative to the current frame.
class FrameOffset : public Offset {
public:
- explicit FrameOffset(size_t val) : Offset(val) {}
+ constexpr explicit FrameOffset(size_t val) : Offset(val) {}
bool operator>(FrameOffset other) const { return val_ > other.val_; }
bool operator<(FrameOffset other) const { return val_ < other.val_; }
};
@@ -55,7 +55,7 @@
template<PointerSize pointer_size>
class ThreadOffset : public Offset {
public:
- explicit ThreadOffset(size_t val) : Offset(val) {}
+ constexpr explicit ThreadOffset(size_t val) : Offset(val) {}
};
using ThreadOffset32 = ThreadOffset<PointerSize::k32>;
@@ -64,7 +64,7 @@
// Offsets relative to an object.
class MemberOffset : public Offset {
public:
- explicit MemberOffset(size_t val) : Offset(val) {}
+ constexpr explicit MemberOffset(size_t val) : Offset(val) {}
};
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4d16eb5..2e495cc 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -751,7 +751,7 @@
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
- UsageMessage(stream, " -Xpatchoat:filename\n");
+ UsageMessage(stream, " -Xpatchoat:filename (obsolete, ignored)\n");
UsageMessage(stream, " -Xusejit:booleanvalue\n");
UsageMessage(stream, " -Xjitinitialsize:N\n");
UsageMessage(stream, " -Xjitmaxsize:N\n");
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7b92151..36a6b7f 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -165,7 +165,7 @@
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
size_t current_frame_depth = GetFrameDepth();
if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
@@ -402,6 +402,8 @@
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
+ VLOG(deopt) << "Deoptimizing stack: depth: " << GetFrameDepth()
+ << " at method " << ArtMethod::PrettyMethod(method);
if (method == nullptr || single_frame_done_) {
FinishStackWalk();
return false; // End stack walk.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index d62cbdb..45f5633 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -277,7 +277,7 @@
size_t identical;
SummaryElement() : equiv(0), identical(0) {}
- SummaryElement(SummaryElement&& ref) {
+ SummaryElement(SummaryElement&& ref) noexcept {
root = ref.root;
equiv = ref.equiv;
identical = ref.identical;
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 424ee06..00e298e 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -33,7 +33,7 @@
// TODO: Convert to CommonRuntimeTest. Currently MakeExecutable is used.
class ReflectionTest : public CommonCompilerTest {
protected:
- virtual void SetUp() {
+ void SetUp() override {
CommonCompilerTest::SetUp();
vm_ = Runtime::Current()->GetJavaVM();
@@ -73,7 +73,7 @@
}
}
- virtual void TearDown() {
+ void TearDown() override {
CleanUpJniEnv();
CommonCompilerTest::TearDown();
}
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 374591e..bde0d11 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -22,6 +22,7 @@
#include "arch/instruction_set.h"
#include "art_method.h"
#include "base/callee_save_type.h"
+#include "base/casts.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
@@ -82,7 +83,7 @@
inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return reinterpret_cast<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
+ return reinterpret_cast64<ArtMethod*>(callee_save_methods_[static_cast<size_t>(type)]);
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6878cc0..4d77b9d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -49,7 +49,6 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "asm_support.h"
-#include "asm_support_check.h"
#include "base/aborting.h"
#include "base/arena_allocator.h"
#include "base/atomic.h"
@@ -86,7 +85,7 @@
#include "hidden_api.h"
#include "image-inl.h"
#include "instrumentation.h"
-#include "intern_table.h"
+#include "intern_table-inl.h"
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
@@ -166,6 +165,11 @@
#include <android/set_abort_message.h>
#endif
+// Static asserts to check the values of generated assembly-support macros.
+#define ASM_DEFINE(NAME, EXPR) static_assert((NAME) == (EXPR), "Unexpected value of " #NAME);
+#include "asm_defines.def"
+#undef ASM_DEFINE
+
namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
@@ -257,6 +261,7 @@
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
async_exceptions_thrown_(false),
+ non_standard_exits_enabled_(false),
is_java_debuggable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
@@ -277,7 +282,6 @@
static_assert(Runtime::kCalleeSaveSize ==
static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
- CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
interpreter::CheckInterpreterAsmConstants();
callbacks_.reset(new RuntimeCallbacks());
@@ -695,15 +699,6 @@
return env->NewGlobalRef(system_class_loader.get());
}
-std::string Runtime::GetPatchoatExecutable() const {
- if (!patchoat_executable_.empty()) {
- return patchoat_executable_;
- }
- std::string patchoat_executable(GetAndroidRoot());
- patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
- return patchoat_executable;
-}
-
std::string Runtime::GetCompilerExecutable() const {
if (!compiler_executable_.empty()) {
return compiler_executable_;
@@ -1190,7 +1185,6 @@
properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
- patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
is_zygote_ = runtime_options.Exists(Opt::Zygote);
is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
@@ -1458,7 +1452,7 @@
CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
CHECK(self != nullptr);
- self->SetCanCallIntoJava(!IsAotCompiler());
+ self->SetIsRuntimeThread(IsAotCompiler());
// Set us to runnable so tools using a runtime can allocate and GC by default
self->TransitionFromSuspendedToRunnable();
@@ -1495,7 +1489,9 @@
}
{
ScopedTrace trace2("AddImageStringsToTable");
- GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
+ for (gc::space::ImageSpace* image_space : heap_->GetBootImageSpaces()) {
+ GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
+ }
}
if (IsJavaDebuggable()) {
// Now that we have loaded the boot image, deoptimize its methods if we are running
@@ -2499,7 +2495,7 @@
}
bool Runtime::CanRelocate() const {
- return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
+ return !IsAotCompiler();
}
bool Runtime::IsCompilingBootImage() const {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f0bf754..398a48d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -164,7 +164,6 @@
}
std::string GetCompilerExecutable() const;
- std::string GetPatchoatExecutable() const;
const std::vector<std::string>& GetCompilerOptions() const {
return compiler_options_;
@@ -400,7 +399,7 @@
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
- static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
+ static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
}
@@ -655,6 +654,14 @@
is_native_debuggable_ = value;
}
+ bool AreNonStandardExitsEnabled() const {
+ return non_standard_exits_enabled_;
+ }
+
+ void SetNonStandardExitsEnabled() {
+ non_standard_exits_enabled_ = true;
+ }
+
bool AreAsyncExceptionsThrown() const {
return async_exceptions_thrown_;
}
@@ -845,7 +852,6 @@
bool image_dex2oat_enabled_;
std::string compiler_executable_;
- std::string patchoat_executable_;
std::vector<std::string> compiler_options_;
std::vector<std::string> image_compiler_options_;
std::string image_location_;
@@ -963,7 +969,7 @@
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
// Whether or not the sig chain (and implicitly the fault handler) should be
- // disabled. Tools like dex2oat or patchoat don't need them. This enables
+ // disabled. Tools like dex2oat don't need them. This enables
// building a statically link version of dex2oat.
bool no_sig_chain_;
@@ -988,6 +994,10 @@
// MterpShouldSwitchInterpreters function.
bool async_exceptions_thrown_;
+ // Whether anything is going to be using the shadow-frame APIs to force a function to return
+ // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
+ bool non_standard_exits_enabled_;
+
// Whether Java code needs to be debuggable.
bool is_java_debuggable_;
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index aaedb23..89f3124 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -458,20 +458,20 @@
ref_ = { &k->GetDexFile(), k->GetDexClassDefIndex() };
}
- void MonitorContendedLocking(Monitor* mon ATTRIBUTE_UNUSED)
+ void MonitorContendedLocking(Monitor* mon ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) { }
- void MonitorContendedLocked(Monitor* mon ATTRIBUTE_UNUSED)
+ void MonitorContendedLocked(Monitor* mon ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) { }
- void ObjectWaitStart(Handle<mirror::Object> obj, int64_t millis ATTRIBUTE_UNUSED)
+ void ObjectWaitStart(Handle<mirror::Object> obj, int64_t millis ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsInterestingObject(obj.Get())) {
saw_wait_start_ = true;
}
}
- void MonitorWaitFinished(Monitor* m, bool timed_out ATTRIBUTE_UNUSED)
+ void MonitorWaitFinished(Monitor* m, bool timed_out ATTRIBUTE_UNUSED) override
REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsInterestingObject(m->GetObject())) {
saw_wait_finished_ = true;
diff --git a/runtime/scoped_thread_state_change.cc b/runtime/scoped_thread_state_change.cc
index edbce05..ae833b4 100644
--- a/runtime/scoped_thread_state_change.cc
+++ b/runtime/scoped_thread_state_change.cc
@@ -20,6 +20,7 @@
#include "base/casts.h"
#include "jni/java_vm_ext.h"
+#include "mirror/object-inl.h"
#include "obj_ptr-inl.h"
#include "runtime-inl.h"
diff --git a/runtime/stack.cc b/runtime/stack.cc
index eb9c661..25939d2 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -139,9 +139,9 @@
} else {
uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
uint32_t value = 0;
- bool success = GetVReg(m, reg, kReferenceVReg, &value);
- // We currently always guarantee the `this` object is live throughout the method.
- CHECK(success) << "Failed to read the this object in " << ArtMethod::PrettyMethod(m);
+ if (!GetVReg(m, reg, kReferenceVReg, &value)) {
+ return nullptr;
+ }
return reinterpret_cast<mirror::Object*>(value);
}
}
@@ -223,20 +223,39 @@
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
+ if (kind == kReferenceVReg && !stack_mask.LoadBit(offset / kFrameSlotSize)) {
+ return false;
+ }
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
- case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegister: {
+ uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+ uint32_t reg = dex_register_map[vreg].GetMachineRegister();
+ if (kind == kReferenceVReg && !(register_mask & (1 << reg))) {
+ return false;
+ }
+ return GetRegisterIfAccessible(reg, kind, val);
+ }
case DexRegisterLocation::Kind::kInRegisterHigh:
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
+ if (kind == kReferenceVReg) {
+ return false;
+ }
uint32_t reg = dex_register_map[vreg].GetMachineRegister();
return GetRegisterIfAccessible(reg, kind, val);
}
- case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map[vreg].GetConstant();
+ case DexRegisterLocation::Kind::kConstant: {
+ uint32_t result = dex_register_map[vreg].GetConstant();
+ if (kind == kReferenceVReg && result != 0) {
+ return false;
+ }
+ *val = result;
return true;
+ }
case DexRegisterLocation::Kind::kNone:
return false;
default:
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index e40bca5..9bd135e 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -86,11 +86,11 @@
struct SubtypeCheckInfoTest : public ::testing::Test {
protected:
- virtual void SetUp() {
+ void SetUp() override {
android::base::InitLogging(/*argv*/nullptr);
}
- virtual void TearDown() {
+ void TearDown() override {
}
static SubtypeCheckInfo MakeSubtypeCheckInfo(BitString path_to_root = {},
@@ -131,7 +131,7 @@
// Create an SubtypeCheckInfo with the same depth, but with everything else reset.
// Returns: SubtypeCheckInfo in the Uninitialized state.
- static SubtypeCheckInfo CopyCleared(SubtypeCheckInfo sc) {
+ static SubtypeCheckInfo CopyCleared(const SubtypeCheckInfo& sc) {
SubtypeCheckInfo cleared_copy{};
cleared_copy.depth_ = sc.depth_;
DCHECK_EQ(SubtypeCheckInfo::kUninitialized, cleared_copy.GetState());
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index 666bf81..9aa3032 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -301,13 +301,13 @@
struct SubtypeCheckTest : public ::testing::Test {
protected:
- virtual void SetUp() {
+ void SetUp() override {
android::base::InitLogging(/*argv*/nullptr);
CreateRootedTree(BitString::kCapacity + 2u, BitString::kCapacity + 2u);
}
- virtual void TearDown() {
+ void TearDown() override {
}
void CreateRootedTree(size_t width, size_t height) {
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index 4e75a4f..289a1a4 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -22,8 +22,6 @@
namespace art {
// The various reasons that we might be suspending a thread.
-// TODO Once kForDebugger is removed by removing the old debugger we should make the kForUserCode
-// just a basic count for bookkeeping instead of linking it as directly with internal suspends.
enum class SuspendReason {
// Suspending for internal reasons (e.g. GC, stack trace, etc.).
// TODO Split this into more descriptive sections.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 19fe4ea..b3492e1 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -45,6 +45,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
@@ -500,7 +501,7 @@
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
ObjPtr<mirror::Object> thread_peer) {
ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer);
- Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
+ Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
// Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
// to stop it from going away.
if (kIsDebugBuild) {
@@ -728,7 +729,7 @@
// JNIEnvExt we created.
// Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
// between the threads.
- child_jni_env_ext.release();
+ child_jni_env_ext.release(); // NOLINT pthreads API.
return;
}
}
@@ -907,7 +908,7 @@
}
self->GetJniEnv()->SetLongField(thread_peer,
WellKnownClasses::java_lang_Thread_nativePeer,
- reinterpret_cast<jlong>(self));
+ reinterpret_cast64<jlong>(self));
return true;
};
return Attach(thread_name, as_daemon, set_peer_action);
@@ -949,8 +950,9 @@
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
- env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
- reinterpret_cast<jlong>(self));
+ env->SetLongField(peer.get(),
+ WellKnownClasses::java_lang_Thread_nativePeer,
+ reinterpret_cast64<jlong>(self));
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(self);
@@ -1236,34 +1238,6 @@
LOG(FATAL) << ss.str();
}
-void Thread::SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code) {
- CHECK_EQ(this, Thread::Current()) << "This function may only be called on the current thread. "
- << *Thread::Current() << " tried to modify the suspendability "
- << "of " << *this;
- // NB This checks the new value! This ensures that we can only set can_be_suspended_by_user_code
- // to false if !CanCallIntoJava().
- DCHECK(!CanCallIntoJava() || can_be_suspended_by_user_code)
- << "Threads able to call into java may not be marked as unsuspendable!";
- if (can_be_suspended_by_user_code == CanBeSuspendedByUserCode()) {
- // Don't need to do anything if nothing is changing.
- return;
- }
- art::MutexLock mu(this, *Locks::user_code_suspension_lock_);
- art::MutexLock thread_list_mu(this, *Locks::thread_suspend_count_lock_);
-
- // We want to add the user-code suspend count if we are newly allowing user-code suspends and
- // remove them if we are disabling them.
- int adj = can_be_suspended_by_user_code ? GetUserCodeSuspendCount() : -GetUserCodeSuspendCount();
- // Adjust the global suspend count appropriately. Use kInternal to not change the ForUserCode
- // count.
- if (adj != 0) {
- bool suspend = ModifySuspendCountInternal(this, adj, nullptr, SuspendReason::kInternal);
- CHECK(suspend) << this << " was unable to modify it's own suspend count!";
- }
- // Mark thread as accepting user-code suspensions.
- can_be_suspended_by_user_code_ = can_be_suspended_by_user_code;
-}
-
bool Thread::ModifySuspendCountInternal(Thread* self,
int delta,
AtomicInteger* suspend_barrier,
@@ -1285,17 +1259,6 @@
LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
return false;
}
- DCHECK(this == self || this->IsSuspended())
- << "Only self kForUserCode suspension on an unsuspended thread is allowed: " << this;
- if (UNLIKELY(!CanBeSuspendedByUserCode())) {
- VLOG(threads) << this << " is being requested to suspend for user code but that is disabled "
- << "the thread will not actually go to sleep.";
- // Having the user_code_suspend_count still be around is useful but we don't need to actually
- // do anything since we aren't going to 'really' suspend. Just adjust the
- // user_code_suspend_count and return.
- tls32_.user_code_suspend_count += delta;
- return true;
- }
}
if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
UnsafeLogFatalForSuspendCount(self, this);
@@ -2156,8 +2119,7 @@
Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
- can_call_into_java_(true),
- can_be_suspended_by_user_code_(true) {
+ is_runtime_thread_(false) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
@@ -2181,6 +2143,10 @@
tls32_.is_transitioning_to_runnable = false;
}
+bool Thread::CanLoadClasses() const {
+ return !IsRuntimeThread() || !Runtime::Current()->IsJavaDebuggable();
+}
+
bool Thread::IsStillStarting() const {
// You might think you can check whether the state is kStarting, but for much of thread startup,
// the thread is in kNative; it might also be in kVmWait.
@@ -2515,7 +2481,7 @@
saved_frames_(saved_frames),
max_saved_frames_(max_saved_frames) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to skip frames up to and including the exception's constructor.
// Note we also skip the frame if it doesn't have a method (namely the callee
// save frame)
@@ -2603,7 +2569,7 @@
self_->EndAssertNoThreadSuspension(nullptr);
}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
@@ -3147,8 +3113,10 @@
}
void Thread::ThrowOutOfMemoryError(const char* msg) {
- LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
- msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
+ LOG(WARNING) << "Throwing OutOfMemoryError "
+ << '"' << msg << '"'
+ << " (VmSize " << GetProcessStatus("VmSize")
+ << (tls32_.throwing_OutOfMemoryError ? ", recursive case)" : ")");
if (!tls32_.throwing_OutOfMemoryError) {
tls32_.throwing_OutOfMemoryError = true;
ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
@@ -3402,11 +3370,41 @@
HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
instrumentation->ExceptionThrownEvent(this, exception.Ptr());
}
- // Does instrumentation need to deoptimize the stack?
- // Note: we do this *after* reporting the exception to instrumentation in case it
- // now requires deoptimization. It may happen if a debugger is attached and requests
- // new events (single-step, breakpoint, ...) when the exception is reported.
- if (Dbg::IsForcedInterpreterNeededForException(this)) {
+ // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
+ // Note: we do this *after* reporting the exception to instrumentation in case it now requires
+ // deoptimization. It may happen if a debugger is attached and requests new events (single-step,
+ // breakpoint, ...) when the exception is reported.
+ //
+ // Note we need to check for both force_frame_pop and force_retry_instruction. The first is
+ // expected to happen fairly regularly but the second can only happen if we are using
+ // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later
+ // and see every frame being popped. We don't need to handle it any differently.
+ ShadowFrame* cf;
+ bool force_deopt;
+ {
+ NthCallerVisitor visitor(this, 0, false);
+ visitor.WalkStack();
+ cf = visitor.GetCurrentShadowFrame();
+ if (cf == nullptr) {
+ cf = FindDebuggerShadowFrame(visitor.GetFrameId());
+ }
+ bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame();
+ bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction();
+ if (kIsDebugBuild && force_frame_pop) {
+ NthCallerVisitor penultimate_visitor(this, 1, false);
+ penultimate_visitor.WalkStack();
+ ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame();
+ if (penultimate_frame == nullptr) {
+ penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId());
+ }
+ DCHECK(penultimate_frame != nullptr &&
+ penultimate_frame->GetForceRetryInstruction())
+ << "Force pop frame without retry instruction found. penultimate frame is null: "
+ << (penultimate_frame == nullptr ? "true" : "false");
+ }
+ force_deopt = force_frame_pop || force_retry_instr;
+ }
+ if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
@@ -3414,10 +3412,18 @@
const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
// Save the exception into the deoptimization context so it can be restored
// before entering the interpreter.
+ if (force_deopt) {
+ VLOG(deopt) << "Deopting " << cf->GetMethod()->PrettyMethod() << " for frame-pop";
+ DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+ // Get rid of the exception since we are doing a framepop instead.
+ LOG(WARNING) << "Suppressing pending exception for retry-instruction/frame-pop: "
+ << exception->Dump();
+ ClearException();
+ }
PushDeoptimizationContext(
JValue(),
false /* is_reference */,
- exception,
+ (force_deopt ? nullptr : exception),
false /* from_code */,
method_type);
artDeoptimize(this);
@@ -3456,45 +3462,37 @@
return result;
}
-// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
-// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
-struct CurrentMethodVisitor final : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread,
- context,
- StackVisitor::StackWalkKind::kIncludeInlinedFrames,
- check_suspended),
- this_object_(nullptr),
- method_(nullptr),
- dex_pc_(0),
- abort_on_error_(abort_on_error) {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m->IsRuntimeMethod()) {
- // Continue if this is a runtime method.
- return true;
- }
- if (context_ != nullptr) {
- this_object_ = GetThisObject();
- }
- method_ = m;
- dex_pc_ = GetDexPc(abort_on_error_);
- return false;
- }
- ObjPtr<mirror::Object> this_object_;
- ArtMethod* method_;
- uint32_t dex_pc_;
- const bool abort_on_error_;
-};
-
ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
bool check_suspended,
bool abort_on_error) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this),
- nullptr,
- check_suspended,
- abort_on_error);
+ // Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
+ // so we don't abort in a special situation (thinlocked monitor) when dumping the Java
+ // stack.
+ struct CurrentMethodVisitor final : public StackVisitor {
+ CurrentMethodVisitor(Thread* thread, bool check_suspended, bool abort_on_error)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ : StackVisitor(thread,
+ /* context= */nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
+ method_(nullptr),
+ dex_pc_(0),
+ abort_on_error_(abort_on_error) {}
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ if (m->IsRuntimeMethod()) {
+ // Continue if this is a runtime method.
+ return true;
+ }
+ method_ = m;
+ dex_pc_ = GetDexPc(abort_on_error_);
+ return false;
+ }
+ ArtMethod* method_;
+ uint32_t dex_pc_;
+ const bool abort_on_error_;
+ };
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this), check_suspended, abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -3520,7 +3518,7 @@
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
visitor_(visitor) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod())
<< StringPrintf("@ PC:%04x", GetDexPc());
@@ -3609,8 +3607,8 @@
if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
DCHECK(method_header->IsOptimized());
- StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
- reinterpret_cast<uintptr_t>(cur_quick_frame));
+ StackReference<mirror::Object>* vreg_base =
+ reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
CodeInfo code_info(method_header, kPrecise
? CodeInfo::DecodeFlags::AllTables // We will need dex register maps.
@@ -4076,4 +4074,13 @@
UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true);
}
+void Thread::ClearAllInterpreterCaches() {
+ static struct ClearInterpreterCacheClosure : Closure {
+ virtual void Run(Thread* thread) {
+ thread->GetInterpreterCache()->Clear(thread);
+ }
+ } closure;
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index d169a62..d7dc5ae 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -38,6 +38,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "handle_scope.h"
#include "instrumentation.h"
+#include "interpreter/interpreter_cache.h"
#include "jvalue.h"
#include "managed_stack.h"
#include "offsets.h"
@@ -650,28 +651,28 @@
//
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThinLockIdOffset() {
+ static constexpr ThreadOffset<pointer_size> ThinLockIdOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> InterruptedOffset() {
+ static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, interrupted));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadFlagsOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadFlagsOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> IsGcMarkingOffset() {
+ static constexpr ThreadOffset<pointer_size> IsGcMarkingOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
@@ -686,21 +687,12 @@
private:
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
+ static constexpr ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
- size_t scale;
- size_t shrink;
- if (pointer_size == kRuntimePointerSize) {
- scale = 1;
- shrink = 1;
- } else if (pointer_size > kRuntimePointerSize) {
- scale = static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize);
- shrink = 1;
- } else {
- DCHECK_GT(kRuntimePointerSize, pointer_size);
- scale = 1;
- shrink = static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size);
- }
+ size_t scale = (pointer_size > kRuntimePointerSize) ?
+ static_cast<size_t>(pointer_size) / static_cast<size_t>(kRuntimePointerSize) : 1;
+ size_t shrink = (kRuntimePointerSize > pointer_size) ?
+ static_cast<size_t>(kRuntimePointerSize) / static_cast<size_t>(pointer_size) : 1;
return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
}
@@ -740,82 +732,70 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> SelfOffset() {
+ static constexpr ThreadOffset<pointer_size> SelfOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
+ static constexpr ThreadOffset<pointer_size> MterpCurrentIBaseOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_current_ibase));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpDefaultIBaseOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_default_ibase));
- }
-
- template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> MterpAltIBaseOffset() {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, mterp_alt_ibase));
- }
-
- template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ExceptionOffset() {
+ static constexpr ThreadOffset<pointer_size> ExceptionOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> PeerOffset() {
+ static constexpr ThreadOffset<pointer_size> PeerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> CardTableOffset() {
+ static constexpr ThreadOffset<pointer_size> CardTableOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalPosOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_pos));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_end));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_objects));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+ static constexpr ThreadOffset<pointer_size> RosAllocRunsOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
rosalloc_runs));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_top));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+ static constexpr ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
thread_local_alloc_stack_end));
}
@@ -858,19 +838,19 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> StackEndOffset() {
+ static constexpr ThreadOffset<pointer_size> StackEndOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> JniEnvOffset() {
+ static constexpr ThreadOffset<pointer_size> JniEnvOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
+ static constexpr ThreadOffset<pointer_size> TopOfManagedStackOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
ManagedStack::TaggedTopQuickFrameOffset());
@@ -892,7 +872,7 @@
ALWAYS_INLINE ShadowFrame* PopShadowFrame();
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopShadowFrameOffset() {
+ static constexpr ThreadOffset<pointer_size> TopShadowFrameOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
ManagedStack::TopShadowFrameOffset());
@@ -921,7 +901,7 @@
}
template<PointerSize pointer_size>
- static ThreadOffset<pointer_size> TopHandleScopeOffset() {
+ static constexpr ThreadOffset<pointer_size> TopHandleScopeOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
top_handle_scope));
}
@@ -989,25 +969,17 @@
--tls32_.disable_thread_flip_count;
}
- // Returns true if the thread is subject to user_code_suspensions.
- bool CanBeSuspendedByUserCode() const {
- return can_be_suspended_by_user_code_;
+ // Returns true if the thread is a runtime thread (eg from a ThreadPool).
+ bool IsRuntimeThread() const {
+ return is_runtime_thread_;
}
- // Sets CanBeSuspenededByUserCode and adjusts the suspend-count as needed. This may only be called
- // when running on the current thread. It is **absolutely required** that this be called only on
- // the Thread::Current() thread.
- void SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code)
- REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::user_code_suspension_lock_);
-
- // Returns true if the thread is allowed to call into java.
- bool CanCallIntoJava() const {
- return can_call_into_java_;
+ void SetIsRuntimeThread(bool is_runtime_thread) {
+ is_runtime_thread_ = is_runtime_thread;
}
- void SetCanCallIntoJava(bool can_call_into_java) {
- can_call_into_java_ = can_call_into_java;
- }
+ // Returns true if the thread is allowed to load java classes.
+ bool CanLoadClasses() const;
// Activates single step control for debugging. The thread takes the
// ownership of the given SingleStepControl*. It is deleted by a call
@@ -1215,30 +1187,14 @@
bool ProtectStack(bool fatal_on_error = true);
bool UnprotectStack();
- void SetMterpDefaultIBase(void* ibase) {
- tlsPtr_.mterp_default_ibase = ibase;
- }
-
void SetMterpCurrentIBase(void* ibase) {
tlsPtr_.mterp_current_ibase = ibase;
}
- void SetMterpAltIBase(void* ibase) {
- tlsPtr_.mterp_alt_ibase = ibase;
- }
-
- const void* GetMterpDefaultIBase() const {
- return tlsPtr_.mterp_default_ibase;
- }
-
const void* GetMterpCurrentIBase() const {
return tlsPtr_.mterp_current_ibase;
}
- const void* GetMterpAltIBase() const {
- return tlsPtr_.mterp_alt_ibase;
- }
-
bool HandlingSignal() const {
return tls32_.handling_signal_;
}
@@ -1299,6 +1255,29 @@
jobject thread_group)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE InterpreterCache* GetInterpreterCache() {
+ return &interpreter_cache_;
+ }
+
+ // Clear all thread-local interpreter caches.
+ //
+ // Since the caches are keyed by memory pointer to dex instructions, this must be
+ // called when any dex code is unloaded (before different code gets loaded at the
+ // same memory location).
+ //
+ // If presence of cache entry implies some pre-conditions, this must also be
+ // called if the pre-conditions might no longer hold true.
+ static void ClearAllInterpreterCaches();
+
+ template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> InterpreterCacheOffset() {
+ return ThreadOffset<pointer_size>(OFFSETOF_MEMBER(Thread, interpreter_cache_));
+ }
+
+ static constexpr int InterpreterCacheSizeLog2() {
+ return WhichPowerOf2(InterpreterCache::kSize);
+ }
+
private:
explicit Thread(bool daemon);
~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
@@ -1563,9 +1542,8 @@
// critical section enter.
uint32_t disable_thread_flip_count;
- // If CanBeSuspendedByUserCode, how much of 'suspend_count_' is by request of user code, used to
- // distinguish threads suspended by the runtime from those suspended by user code. Otherwise
- // this is just a count of how many user-code suspends have been attempted (but were ignored).
+ // How much of 'suspend_count_' is by request of user code, used to distinguish threads
+ // suspended by the runtime from those suspended by user code.
// This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
@@ -1593,8 +1571,7 @@
last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
thread_local_limit(nullptr),
- thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
- mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
+ thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr),
flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
async_exception(nullptr) {
@@ -1731,10 +1708,8 @@
JniEntryPoints jni_entrypoints;
QuickEntryPoints quick_entrypoints;
- // Mterp jump table bases.
+ // Mterp jump table base.
void* mterp_current_ibase;
- void* mterp_default_ibase;
- void* mterp_alt_ibase;
// There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
void* rosalloc_runs[kNumRosAllocThreadLocalSizeBracketsInThread];
@@ -1759,6 +1734,14 @@
mirror::Throwable* async_exception;
} tlsPtr_;
+ // Small thread-local cache to be used from the interpreter.
+ // It is keyed by dex instruction pointer.
+ // The value is opcode-depended (e.g. field offset).
+ InterpreterCache interpreter_cache_;
+
+ // All fields below this line should not be accessed by native code. This means these fields can
+ // be modified, rearranged, added or removed without having to modify asm_support.h
+
// Guards the 'wait_monitor_' members.
Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -1780,13 +1763,8 @@
// compiled code or entrypoints.
SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
- // True if the thread is allowed to call back into java (for e.g. during class resolution).
- // By default this is true.
- bool can_call_into_java_;
-
- // True if the thread is subject to user-code suspension. By default this is true. This can only
- // be false for threads where '!can_call_into_java_'.
- bool can_be_suspended_by_user_code_;
+ // True if the thread is some form of runtime thread (ex, GC or JIT).
+ bool is_runtime_thread_;
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index cddc275..ec40716 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -902,8 +902,6 @@
bool request_suspension,
SuspendReason reason,
bool* timed_out) {
- CHECK_NE(reason, SuspendReason::kForUserCode) << "Cannot suspend for user-code by peer. Must be "
- << "done directly on the thread.";
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
*timed_out = false;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 28fc59c..f1c808b 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -102,15 +102,10 @@
nullptr,
worker->thread_pool_->create_peers_));
worker->thread_ = Thread::Current();
- // Thread pool workers cannot call into java.
- worker->thread_->SetCanCallIntoJava(false);
- // Thread pool workers should not be getting paused by user-code.
- worker->thread_->SetCanBeSuspendedByUserCode(false);
+ // Mark thread pool workers as runtime-threads.
+ worker->thread_->SetIsRuntimeThread(true);
// Do work until its time to shut down.
worker->Run();
- // Thread pool worker is finished. We want to allow suspension during shutdown.
- worker->thread_->SetCanBeSuspendedByUserCode(true);
- // Thread shuts down.
runtime->DetachCurrentThread();
return nullptr;
}
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index d784200..2600f55 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -29,7 +29,7 @@
public:
explicit CountTask(AtomicInteger* count) : count_(count), verbose_(false) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
if (verbose_) {
LOG(INFO) << "Running: " << *self;
}
@@ -39,7 +39,7 @@
++*count_;
}
- void Finalize() {
+ void Finalize() override {
if (verbose_) {
LOG(INFO) << "Finalizing: " << *Thread::Current();
}
@@ -129,7 +129,7 @@
count_(count),
depth_(depth) {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
if (depth_ > 1) {
thread_pool_->AddTask(self, new TreeTask(thread_pool_, count_, depth_ - 1));
thread_pool_->AddTask(self, new TreeTask(thread_pool_, count_, depth_ - 1));
@@ -138,7 +138,7 @@
++*count_;
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
@@ -164,12 +164,12 @@
public:
PeerTask() {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
CHECK(self->GetPeer() != nullptr);
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
};
@@ -178,12 +178,12 @@
public:
NoPeerTask() {}
- void Run(Thread* self) {
+ void Run(Thread* self) override {
ScopedObjectAccess soa(self);
CHECK(self->GetPeer() == nullptr);
}
- void Finalize() {
+ void Finalize() override {
delete this;
}
};
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 608f0ee..97c39bb 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -134,7 +134,9 @@
}
if (needs_native_bridge) {
// TODO: Consider support?
- android::CloseNativeLibrary(dlopen_handle, needs_native_bridge);
+ // The result of this call and error_msg is ignored because the most
+ // relevant error is that native bridge is unsupported.
+ android::CloseNativeLibrary(dlopen_handle, needs_native_bridge, error_msg);
*error_msg = StringPrintf("Native-bridge agents unsupported: %s", name_.c_str());
*error = kLoadingError;
return nullptr;
@@ -174,7 +176,7 @@
}
}
-Agent::Agent(Agent&& other)
+Agent::Agent(Agent&& other) noexcept
: dlopen_handle_(nullptr),
onload_(nullptr),
onattach_(nullptr),
@@ -182,7 +184,7 @@
*this = std::move(other);
}
-Agent& Agent::operator=(Agent&& other) {
+Agent& Agent::operator=(Agent&& other) noexcept {
if (this != &other) {
if (dlopen_handle_ != nullptr) {
Unload();
diff --git a/runtime/ti/agent.h b/runtime/ti/agent.h
index 24a6f1c..faf76a1 100644
--- a/runtime/ti/agent.h
+++ b/runtime/ti/agent.h
@@ -105,8 +105,8 @@
// TODO We need to acquire some locks probably.
void Unload();
- Agent(Agent&& other);
- Agent& operator=(Agent&& other);
+ Agent(Agent&& other) noexcept;
+ Agent& operator=(Agent&& other) noexcept;
~Agent();
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 1986eec..4ee983d 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -64,7 +64,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_trace_(Trace::AllocStackTrace()) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime frames (in particular callee save).
if (!m->IsRuntimeMethod()) {
@@ -383,9 +383,6 @@
}
};
std::unique_ptr<File, decltype(deleter)> trace_file(trace_file_in.release(), deleter);
- if (trace_file != nullptr) {
- trace_file->DisableAutoClose();
- }
Thread* self = Thread::Current();
{
@@ -420,7 +417,7 @@
if (the_trace_ != nullptr) {
LOG(ERROR) << "Trace already in progress, ignoring this request";
} else {
- enable_stats = (flags && kTraceCountAllocs) != 0;
+ enable_stats = (flags & kTraceCountAllocs) != 0;
the_trace_ = new Trace(trace_file.release(), buffer_size, flags, output_mode, trace_mode);
if (trace_mode == TraceMode::kSampling) {
CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
@@ -608,7 +605,7 @@
Runtime* runtime = Runtime::Current();
// Enable count of allocs if specified in the flags.
- bool enable_stats = (the_trace->flags_ && kTraceCountAllocs) != 0;
+ bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0;
{
gc::ScopedGCCriticalSection gcs(self,
@@ -891,15 +888,6 @@
LOG(ERROR) << "Unexpected branch event in tracing" << ArtMethod::PrettyMethod(method);
}
-void Trace::InvokeVirtualOrInterface(Thread*,
- Handle<mirror::Object>,
- ArtMethod* method,
- uint32_t dex_pc,
- ArtMethod*) {
- LOG(ERROR) << "Unexpected invoke event in tracing" << ArtMethod::PrettyMethod(method)
- << " " << dex_pc;
-}
-
void Trace::WatchedFramePop(Thread* self ATTRIBUTE_UNUSED,
const ShadowFrame& frame ATTRIBUTE_UNUSED) {
LOG(ERROR) << "Unexpected WatchedFramePop event in tracing";
@@ -1127,7 +1115,7 @@
void Trace::DumpThreadList(std::ostream& os) {
Thread* self = Thread::Current();
- for (auto it : exited_threads_) {
+ for (const auto& it : exited_threads_) {
os << it.first << "\t" << it.second << "\n";
}
Locks::thread_list_lock_->AssertNotHeld(self);
diff --git a/runtime/trace.h b/runtime/trace.h
index 5d96493..926a34f 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -224,12 +224,6 @@
uint32_t dex_pc,
int32_t dex_pc_offset)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
- void InvokeVirtualOrInterface(Thread* thread,
- Handle<mirror::Object> this_object,
- ArtMethod* caller,
- uint32_t dex_pc,
- ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) override;
void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
REQUIRES_SHARED(Locks::mutator_lock_) override;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index c9766bc..1e5b2bb 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -320,7 +320,7 @@
void Transaction::VisitObjectLogs(RootVisitor* visitor) {
// List of moving roots.
- typedef std::pair<mirror::Object*, mirror::Object*> ObjectPair;
+ using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
std::list<ObjectPair> moving_roots;
// Visit roots.
@@ -348,7 +348,7 @@
void Transaction::VisitArrayLogs(RootVisitor* visitor) {
// List of moving roots.
- typedef std::pair<mirror::Array*, mirror::Array*> ArrayPair;
+ using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
std::list<ArrayPair> moving_roots;
for (auto& it : array_logs_) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a1b8938..5fce892 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -149,6 +149,7 @@
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
+ uint32_t api_level,
std::string* error) {
if (klass->IsVerified()) {
return FailureKind::kNoFailure;
@@ -188,6 +189,7 @@
callbacks,
allow_soft_failures,
log_level,
+ api_level,
error);
}
@@ -211,6 +213,7 @@
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
+ uint32_t api_level,
std::string* error) {
// A class must not be abstract and final.
if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
@@ -261,6 +264,7 @@
allow_soft_failures,
log_level,
/*need_precise_constants*/ false,
+ api_level,
&hard_failure_msg);
if (result.kind == FailureKind::kHardFailure) {
if (failure_data.kind == FailureKind::kHardFailure) {
@@ -322,6 +326,7 @@
bool allow_soft_failures,
HardFailLogMode log_level,
bool need_precise_constants,
+ uint32_t api_level,
std::string* hard_failure_msg) {
MethodVerifier::FailureData result;
uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
@@ -339,7 +344,8 @@
allow_soft_failures,
need_precise_constants,
false /* verify to dump */,
- true /* allow_thread_suspension */);
+ true /* allow_thread_suspension */,
+ api_level);
if (verifier.Verify()) {
// Verification completed, however failures may be pending that didn't cause the verification
// to hard fail.
@@ -458,7 +464,8 @@
const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
ArtMethod* method,
- uint32_t method_access_flags) {
+ uint32_t method_access_flags,
+ uint32_t api_level) {
MethodVerifier* verifier = new MethodVerifier(self,
dex_file,
dex_cache,
@@ -472,7 +479,8 @@
true /* allow_soft_failures */,
true /* need_precise_constants */,
true /* verify_to_dump */,
- true /* allow_thread_suspension */);
+ true /* allow_thread_suspension */,
+ api_level);
verifier->Verify();
verifier->DumpFailures(vios->Stream());
vios->Stream() << verifier->info_messages_.str();
@@ -500,7 +508,8 @@
bool allow_soft_failures,
bool need_precise_constants,
bool verify_to_dump,
- bool allow_thread_suspension)
+ bool allow_thread_suspension,
+ uint32_t api_level)
: self_(self),
arena_stack_(Runtime::Current()->GetArenaPool()),
allocator_(&arena_stack_),
@@ -534,7 +543,8 @@
verify_to_dump_(verify_to_dump),
allow_thread_suspension_(allow_thread_suspension),
is_constructor_(false),
- link_(nullptr) {
+ link_(nullptr),
+ api_level_(api_level == 0 ? std::numeric_limits<uint32_t>::max() : api_level) {
self->PushVerifier(this);
}
@@ -546,7 +556,8 @@
void MethodVerifier::FindLocksAtDexPc(
ArtMethod* m,
uint32_t dex_pc,
- std::vector<MethodVerifier::DexLockInfo>* monitor_enter_dex_pcs) {
+ std::vector<MethodVerifier::DexLockInfo>* monitor_enter_dex_pcs,
+ uint32_t api_level) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
@@ -563,7 +574,8 @@
true /* allow_soft_failures */,
false /* need_precise_constants */,
false /* verify_to_dump */,
- false /* allow_thread_suspension */);
+ false /* allow_thread_suspension */,
+ api_level);
verifier.interesting_dex_pc_ = dex_pc;
verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
verifier.FindLocksAtDexPc();
@@ -3663,9 +3675,11 @@
// the access-checks interpreter. If result is primitive, skip the access check.
//
// Note: we do this for unresolved classes to trigger re-verification at runtime.
- if (C == CheckAccess::kYes && result->IsNonZeroReferenceTypes()) {
+ if (C == CheckAccess::kYes &&
+ result->IsNonZeroReferenceTypes() &&
+ (api_level_ >= 28u || !result->IsUnresolvedTypes())) {
const RegType& referrer = GetDeclaringClass();
- if (!referrer.CanAccess(*result)) {
+ if ((api_level_ >= 28u || !referrer.IsUnresolvedTypes()) && !referrer.CanAccess(*result)) {
Fail(VERIFY_ERROR_ACCESS_CLASS) << "(possibly) illegal class access: '"
<< referrer << "' -> '" << *result << "'";
}
@@ -4548,7 +4562,7 @@
}
if (klass_type.IsUnresolvedTypes()) {
// Accessibility checks depend on resolved fields.
- DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty());
+ DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty() || api_level_ < 28u);
return nullptr; // Can't resolve Class so no more to do here, will do checking at runtime.
}
@@ -4589,7 +4603,7 @@
}
if (klass_type.IsUnresolvedTypes()) {
// Accessibility checks depend on resolved fields.
- DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty());
+ DCHECK(klass_type.Equals(GetDeclaringClass()) || !failures_.empty() || api_level_ < 28u);
return nullptr; // Can't resolve Class so no more to do here
}
@@ -4725,7 +4739,7 @@
DCHECK(!can_load_classes_ || self_->IsExceptionPending());
self_->ClearException();
}
- } else {
+ } else if (api_level_ >= 28u) {
// If we don't have the field (it seems we failed resolution) and this is a PUT, we need to
// redo verification at runtime as the field may be final, unless the field id shows it's in
// the same class.
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 9890af9..eef2280 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -100,6 +100,7 @@
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
+ uint32_t api_level,
std::string* error)
REQUIRES_SHARED(Locks::mutator_lock_);
static FailureKind VerifyClass(Thread* self,
@@ -110,6 +111,7 @@
CompilerCallbacks* callbacks,
bool allow_soft_failures,
HardFailLogMode log_level,
+ uint32_t api_level,
std::string* error)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -121,7 +123,8 @@
Handle<mirror::ClassLoader> class_loader,
const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item, ArtMethod* method,
- uint32_t method_access_flags)
+ uint32_t method_access_flags,
+ uint32_t api_level)
REQUIRES_SHARED(Locks::mutator_lock_);
uint8_t EncodePcToReferenceMapData() const;
@@ -163,8 +166,10 @@
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
// Note: this is the only situation where the verifier will visit quickened instructions.
- static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
- std::vector<DexLockInfo>* monitor_enter_dex_pcs)
+ static void FindLocksAtDexPc(ArtMethod* m,
+ uint32_t dex_pc,
+ std::vector<DexLockInfo>* monitor_enter_dex_pcs,
+ uint32_t api_level)
REQUIRES_SHARED(Locks::mutator_lock_);
static void Init() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -242,7 +247,8 @@
bool allow_soft_failures,
bool need_precise_constants,
bool verify_to_dump,
- bool allow_thread_suspension)
+ bool allow_thread_suspension,
+ uint32_t api_level)
REQUIRES_SHARED(Locks::mutator_lock_);
void UninstantiableError(const char* descriptor);
@@ -299,6 +305,7 @@
bool allow_soft_failures,
HardFailLogMode log_level,
bool need_precise_constants,
+ uint32_t api_level,
std::string* hard_failure_msg)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -790,6 +797,10 @@
// Link, for the method verifier root linked list.
MethodVerifier* link_;
+ // API level, for dependent checks. Note: we do not use '0' for unset here, to simplify checks.
+ // Instead, unset level should correspond to max().
+ const uint32_t api_level_;
+
friend class art::Thread;
friend class VerifierDepsTest;
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index d1be9fa..cedc583 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -42,7 +42,7 @@
// Verify the class
std::string error_msg;
FailureKind failure = MethodVerifier::VerifyClass(
- self, klass, nullptr, true, HardFailLogMode::kLogWarning, &error_msg);
+ self, klass, nullptr, true, HardFailLogMode::kLogWarning, /* api_level */ 0u, &error_msg);
if (android::base::StartsWith(descriptor, "Ljava/lang/invoke")) {
ASSERT_TRUE(failure == FailureKind::kSoftFailure ||
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 2e5f46c..cbc3ff8 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -490,8 +490,13 @@
}
// Read the current action without looking at the chain, it should be the expected action.
+#if defined(__BIONIC__)
+ struct sigaction64 current_action;
+ linked_sigaction64(signal, nullptr, ¤t_action);
+#else
struct sigaction current_action;
linked_sigaction(signal, nullptr, ¤t_action);
+#endif
// If the sigactions don't match then we put the current action on the chain and make ourself as
// the main action.
diff --git a/sigchainlib/sigchain_test.cc b/sigchainlib/sigchain_test.cc
index 9584ded..bb99787 100644
--- a/sigchainlib/sigchain_test.cc
+++ b/sigchainlib/sigchain_test.cc
@@ -26,7 +26,8 @@
* SUCH DAMAGE.
*/
-
+#include <dlfcn.h>
+#include <pthread.h>
#include <signal.h>
#include <sys/syscall.h>
@@ -37,7 +38,7 @@
#include "sigchain.h"
#if !defined(__BIONIC__)
-typedef sigset_t sigset64_t;
+using sigset64_t = sigset_t;
static int sigemptyset64(sigset64_t* set) {
return sigemptyset(set);
@@ -63,10 +64,25 @@
}
art::SigchainAction action = {
- .sc_sigaction = [](int, siginfo_t*, void*) { return true; },
+ .sc_sigaction = [](int, siginfo_t* info, void*) -> bool {
+ return info->si_value.sival_ptr;
+ },
.sc_mask = {},
.sc_flags = 0,
};
+
+ protected:
+ void RaiseHandled() {
+ sigval_t value;
+ value.sival_ptr = &value;
+ pthread_sigqueue(pthread_self(), SIGSEGV, value);
+ }
+
+ void RaiseUnhandled() {
+ sigval_t value;
+ value.sival_ptr = nullptr;
+ pthread_sigqueue(pthread_self(), SIGSEGV, value);
+ }
};
@@ -185,3 +201,40 @@
}
#endif
+
+// Make sure that we properly put ourselves back in front if we get circumvented.
+TEST_F(SigchainTest, EnsureFrontOfChain) {
+#if defined(__BIONIC__)
+ constexpr char kLibcSoName[] = "libc.so";
+#elif defined(__GNU_LIBRARY__) && __GNU_LIBRARY__ == 6
+ constexpr char kLibcSoName[] = "libc.so.6";
+#else
+ #error Unknown libc
+#endif
+ void* libc = dlopen(kLibcSoName, RTLD_LAZY | RTLD_NOLOAD);
+ ASSERT_TRUE(libc);
+
+ static sig_atomic_t called = 0;
+ struct sigaction action = {};
+ action.sa_flags = SA_SIGINFO;
+ action.sa_sigaction = [](int, siginfo_t*, void*) { called = 1; };
+
+ ASSERT_EQ(0, sigaction(SIGSEGV, &action, nullptr));
+
+ // Try before EnsureFrontOfChain.
+ RaiseHandled();
+ ASSERT_EQ(0, called);
+
+ RaiseUnhandled();
+ ASSERT_EQ(1, called);
+ called = 0;
+
+ // ...and after.
+ art::EnsureFrontOfChain(SIGSEGV);
+ ASSERT_EQ(0, called);
+ called = 0;
+
+ RaiseUnhandled();
+ ASSERT_EQ(1, called);
+ called = 0;
+}
diff --git a/simulator/code_simulator_container.cc b/simulator/code_simulator_container.cc
index 3206bc7..dc553df 100644
--- a/simulator/code_simulator_container.cc
+++ b/simulator/code_simulator_container.cc
@@ -34,13 +34,13 @@
if (libart_simulator_handle_ == nullptr) {
VLOG(simulator) << "Could not load " << libart_simulator_so_name << ": " << dlerror();
} else {
- typedef CodeSimulator* (*create_code_simulator_ptr_)(InstructionSet target_isa);
- create_code_simulator_ptr_ create_code_simulator_ =
- reinterpret_cast<create_code_simulator_ptr_>(
+ using CreateCodeSimulatorPtr = CodeSimulator*(*)(InstructionSet);
+ CreateCodeSimulatorPtr create_code_simulator =
+ reinterpret_cast<CreateCodeSimulatorPtr>(
dlsym(libart_simulator_handle_, "CreateCodeSimulator"));
- DCHECK(create_code_simulator_ != nullptr) << "Fail to find symbol of CreateCodeSimulator: "
+ DCHECK(create_code_simulator != nullptr) << "Fail to find symbol of CreateCodeSimulator: "
<< dlerror();
- simulator_ = create_code_simulator_(target_isa);
+ simulator_ = create_code_simulator(target_isa);
}
}
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 33a8f5b..540e6ce 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -62,7 +62,7 @@
int attach_result = jvm->AttachCurrentThread(&env, &args);
CHECK_EQ(attach_result, 0);
- typedef void (*Fn)(JNIEnv*);
+ using Fn = void(*)(JNIEnv*);
Fn fn = reinterpret_cast<Fn>(arg);
fn(env);
@@ -704,7 +704,7 @@
}
private:
- void TestCalls(const char* declaring_class, std::vector<const char*> methods) {
+ void TestCalls(const char* declaring_class, const std::vector<const char*>& methods) {
jmethodID new_method = env_->GetMethodID(concrete_class_, "<init>", "()V");
jobject obj = env_->NewObject(concrete_class_, new_method);
CHECK(!env_->ExceptionCheck());
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 1ce20e2..4c344a3 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -37,7 +37,7 @@
explicit ReferenceMap2Visitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build
index eeecbfc..3de541c 100644
--- a/test/004-StackWalk/build
+++ b/test/004-StackWalk/build
@@ -18,10 +18,8 @@
set -e
# This test depends on the exact format of the DEX file. Since dx is deprecated,
-# the classes.dex file is packaged as a test input. It was created with:
-#
-# $ javac -g -Xlint:-options -source 1.7 -target 1.7 -d classes src/Main.java
-# $ dx --debug --dex --output=classes.dex classes
+# the classes.dex file is packaged as a test input. See src/Main.java file
+# to check how it was created.
# Wrapper function for javac which for this test does nothing as the
# test uses a pre-built DEX file.
diff --git a/test/004-StackWalk/classes.dex b/test/004-StackWalk/classes.dex
index ad45296..61a7277 100644
--- a/test/004-StackWalk/classes.dex
+++ b/test/004-StackWalk/classes.dex
Binary files differ
diff --git a/test/004-StackWalk/src/Main.java b/test/004-StackWalk/src/Main.java
index 072b1d0..2a098f7 100644
--- a/test/004-StackWalk/src/Main.java
+++ b/test/004-StackWalk/src/Main.java
@@ -1,19 +1,36 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This test depends on the exact format of the DEX file. Since dx is deprecated,
+// the classes.dex file is packaged as a test input. It was created with:
+//
+// $ javac -g -Xlint:-options -source 1.7 -target 1.7 -d classes src/Main.java
+// $ dx --debug --dex --output=classes.dex classes
+
public class Main {
public Main() {
}
- boolean doThrow = false;
-
int $noinline$f() throws Exception {
- g(1);
- g(2);
-
- // This currently defeats inlining of `f`.
- if (doThrow) { throw new Error(); }
+ $noinline$g(1);
+ $noinline$g(2);
return 0;
}
- void g(int num_calls) {
+ void $noinline$g(int num_calls) {
if (num_calls == 1) {
System.out.println("1st call");
} else if (num_calls == 2) {
@@ -81,11 +98,14 @@
s4 = s18 = s19;
s += s4;
s += s18;
- stackmap(0);
- return s;
+ // Add a branch to workaround ART's large methods without branches heuristic.
+ if (testStackWalk(0) != 0) {
+ return s;
+ }
+ return s18;
}
- native int stackmap(int x);
+ native int testStackWalk(int x);
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 89e2e66..81c27ec 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -20,7 +20,7 @@
namespace art {
-#define CHECK_REGS(...) do { \
+#define CHECK_REGS_ARE_REFERENCES(...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
CheckReferences(t, t_size, GetNativePcOffset()); \
@@ -33,7 +33,7 @@
explicit TestReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
@@ -43,40 +43,50 @@
// Given the method name and the number of times the method has been called,
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
- if (m_name == "f") {
+ if (m_name == "$noinline$f") {
if (gJava_StackWalk_refmap_calls == 1) {
CHECK_EQ(1U, GetDexPc());
- CHECK_REGS(4);
+ CHECK_REGS_ARE_REFERENCES(1);
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
CHECK_EQ(5U, GetDexPc());
- CHECK_REGS(4);
+ CHECK_REGS_ARE_REFERENCES(1);
}
- } else if (m_name == "g") {
+ found_f_ = true;
+ } else if (m_name == "$noinline$g") {
if (gJava_StackWalk_refmap_calls == 1) {
CHECK_EQ(0xcU, GetDexPc());
- CHECK_REGS(0, 2); // Note that v1 is not in the minimal root set
+ CHECK_REGS_ARE_REFERENCES(0, 2); // Note that v1 is not in the minimal root set
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
CHECK_EQ(0xcU, GetDexPc());
- CHECK_REGS(0, 2);
+ CHECK_REGS_ARE_REFERENCES(0, 2);
}
+ found_g_ = true;
} else if (m_name == "shlemiel") {
if (gJava_StackWalk_refmap_calls == 1) {
CHECK_EQ(0x380U, GetDexPc());
- CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
+ CHECK_REGS_ARE_REFERENCES(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
} else {
CHECK_EQ(gJava_StackWalk_refmap_calls, 2);
CHECK_EQ(0x380U, GetDexPc());
- CHECK_REGS(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
+ CHECK_REGS_ARE_REFERENCES(2, 4, 5, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 21, 25);
}
+ found_shlemiel_ = true;
}
return true;
}
+
+ ~TestReferenceMapVisitor() {
+ }
+
+ bool found_f_ = false;
+ bool found_g_ = false;
+ bool found_shlemiel_ = false;
};
-extern "C" JNIEXPORT jint JNICALL Java_Main_stackmap(JNIEnv*, jobject, jint count) {
+extern "C" JNIEXPORT jint JNICALL Java_Main_testStackWalk(JNIEnv*, jobject, jint count) {
ScopedObjectAccess soa(Thread::Current());
CHECK_EQ(count, 0);
gJava_StackWalk_refmap_calls++;
@@ -84,17 +94,9 @@
// Visitor
TestReferenceMapVisitor mapper(soa.Self());
mapper.WalkStack();
-
- return count + 1;
-}
-
-extern "C" JNIEXPORT jint JNICALL Java_Main_refmap2(JNIEnv*, jobject, jint count) {
- ScopedObjectAccess soa(Thread::Current());
- gJava_StackWalk_refmap_calls++;
-
- // Visitor
- TestReferenceMapVisitor mapper(soa.Self());
- mapper.WalkStack();
+ CHECK(mapper.found_f_);
+ CHECK(mapper.found_g_);
+ CHECK(mapper.found_shlemiel_);
return count + 1;
}
diff --git a/test/004-checker-UnsafeTest18/src/Main.java b/test/004-checker-UnsafeTest18/src/Main.java
index 282f9ce..927d0da 100644
--- a/test/004-checker-UnsafeTest18/src/Main.java
+++ b/test/004-checker-UnsafeTest18/src/Main.java
@@ -47,21 +47,21 @@
// Setters.
//
- /// CHECK-START: int Main.set32(java.lang.Object, long, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.set32(java.lang.Object, long, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetInt
/// CHECK-DAG: Return [<<Result>>]
private static int set32(Object o, long offset, int newValue) {
return unsafe.getAndSetInt(o, offset, newValue);
}
- /// CHECK-START: long Main.set64(java.lang.Object, long, long) intrinsics_recognition (after)
+ /// CHECK-START: long Main.set64(java.lang.Object, long, long) builder (after)
/// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetLong
/// CHECK-DAG: Return [<<Result>>]
private static long set64(Object o, long offset, long newValue) {
return unsafe.getAndSetLong(o, offset, newValue);
}
- /// CHECK-START: java.lang.Object Main.setObj(java.lang.Object, long, java.lang.Object) intrinsics_recognition (after)
+ /// CHECK-START: java.lang.Object Main.setObj(java.lang.Object, long, java.lang.Object) builder (after)
/// CHECK-DAG: <<Result:l\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetObject
/// CHECK-DAG: Return [<<Result>>]
private static Object setObj(Object o, long offset, Object newValue) {
@@ -72,14 +72,14 @@
// Adders.
//
- /// CHECK-START: int Main.add32(java.lang.Object, long, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.add32(java.lang.Object, long, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddInt
/// CHECK-DAG: Return [<<Result>>]
private static int add32(Object o, long offset, int delta) {
return unsafe.getAndAddInt(o, offset, delta);
}
- /// CHECK-START: long Main.add64(java.lang.Object, long, long) intrinsics_recognition (after)
+ /// CHECK-START: long Main.add64(java.lang.Object, long, long) builder (after)
/// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddLong
/// CHECK-DAG: Return [<<Result>>]
private static long add64(Object o, long offset, long delta) {
@@ -90,7 +90,7 @@
// Fences (native).
//
- /// CHECK-START: void Main.load() intrinsics_recognition (after)
+ /// CHECK-START: void Main.load() builder (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeLoadFence
//
/// CHECK-START: void Main.load() instruction_simplifier (after)
@@ -102,7 +102,7 @@
unsafe.loadFence();
}
- /// CHECK-START: void Main.store() intrinsics_recognition (after)
+ /// CHECK-START: void Main.store() builder (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeStoreFence
//
/// CHECK-START: void Main.store() instruction_simplifier (after)
@@ -114,7 +114,7 @@
unsafe.storeFence();
}
- /// CHECK-START: void Main.full() intrinsics_recognition (after)
+ /// CHECK-START: void Main.full() builder (after)
/// CHECK-DAG: InvokeVirtual intrinsic:UnsafeFullFence
//
/// CHECK-START: void Main.full() instruction_simplifier (after)
diff --git a/test/089-many-methods/check b/test/089-many-methods/check
index 1f71e8e..e09a291 100755
--- a/test/089-many-methods/check
+++ b/test/089-many-methods/check
@@ -14,5 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-grep Error "$2" > "$2.tmp"
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
+EXPECTED_ERROR="Cannot fit requested classes in a single dex"
+if ! grep -q "$EXPECTED_ERROR" "$2"; then
+ exit 1
+else
+ exit 0
+fi
diff --git a/test/089-many-methods/expected.txt b/test/089-many-methods/expected.txt
index bb6ba3c..b75bde4 100644
--- a/test/089-many-methods/expected.txt
+++ b/test/089-many-methods/expected.txt
@@ -1 +1 @@
-Error: Cannot fit requested classes in a single dex file (# fields: 131000 > 65536)
+See the 'check' script for the expectation!
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index a74f763..cc7e806 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -45,7 +45,7 @@
static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
JNIEnv* env = nullptr;
- typedef jint (*FnPtr_t)(JavaVM*, void*);
+ using FnPtr_t = jint(*)(JavaVM*, void*);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("JNI_OnLoad")->fnPtr);
vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6);
@@ -91,9 +91,8 @@
return fnPtr(vm, reserved);
}
-static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env,
- jclass klass) {
- typedef void (*FnPtr_t)(JNIEnv*, jclass);
+static void trampoline_Java_Main_testFindClassOnAttachedNativeThread(JNIEnv* env, jclass klass) {
+ using FnPtr_t = void(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testFindClassOnAttachedNativeThread")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -102,7 +101,7 @@
static void trampoline_Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv* env,
jclass klass) {
- typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ using FnPtr_t = void(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testFindFieldOnAttachedNativeThreadNative")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -111,7 +110,7 @@
static void trampoline_Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
jclass klass) {
- typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ using FnPtr_t = void(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testCallStaticVoidMethodOnSubClassNative")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -119,7 +118,7 @@
}
static jobject trampoline_Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass klass) {
- typedef jobject (*FnPtr_t)(JNIEnv*, jclass);
+ using FnPtr_t = jobject(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testGetMirandaMethodNative")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -127,7 +126,7 @@
}
static void trampoline_Java_Main_testNewStringObject(JNIEnv* env, jclass klass) {
- typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ using FnPtr_t = void(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testNewStringObject")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -135,7 +134,7 @@
}
static void trampoline_Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass klass) {
- typedef void (*FnPtr_t)(JNIEnv*, jclass);
+ using FnPtr_t = void(*)(JNIEnv*, jclass);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>
(find_native_bridge_method("testZeroLengthByteBuffers")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -145,8 +144,8 @@
static jbyte trampoline_Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
- typedef jbyte (*FnPtr_t)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte,
- jbyte, jbyte, jbyte, jbyte, jbyte);
+ using FnPtr_t = jbyte(*)(JNIEnv*, jclass, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte, jbyte,
+ jbyte, jbyte);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("byteMethod")->fnPtr);
printf("%s called!\n", __FUNCTION__);
return fnPtr(env, klass, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10);
@@ -155,8 +154,8 @@
static jshort trampoline_Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
- typedef jshort (*FnPtr_t)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort,
- jshort, jshort, jshort, jshort, jshort);
+ using FnPtr_t = jshort(*)(JNIEnv*, jclass, jshort, jshort, jshort, jshort, jshort, jshort, jshort,
+ jshort, jshort, jshort);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("shortMethod")->fnPtr);
printf("%s called!\n", __FUNCTION__);
return fnPtr(env, klass, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10);
@@ -166,7 +165,7 @@
jboolean b2, jboolean b3, jboolean b4,
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
- typedef jboolean (*FnPtr_t)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
+ using FnPtr_t = jboolean(*)(JNIEnv*, jclass, jboolean, jboolean, jboolean, jboolean, jboolean,
jboolean, jboolean, jboolean, jboolean, jboolean);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("booleanMethod")->fnPtr);
printf("%s called!\n", __FUNCTION__);
@@ -176,8 +175,8 @@
static jchar trampoline_Java_Main_charMethod(JNIEnv* env, jclass klass, jchar c1, jchar c2,
jchar c3, jchar c4, jchar c5, jchar c6,
jchar c7, jchar c8, jchar c9, jchar c10) {
- typedef jchar (*FnPtr_t)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar,
- jchar, jchar, jchar, jchar, jchar);
+ using FnPtr_t = jchar(*)(JNIEnv*, jclass, jchar, jchar, jchar, jchar, jchar, jchar, jchar, jchar,
+ jchar, jchar);
FnPtr_t fnPtr = reinterpret_cast<FnPtr_t>(find_native_bridge_method("charMethod")->fnPtr);
printf("%s called!\n", __FUNCTION__);
return fnPtr(env, klass, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10);
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
deleted file mode 100644
index 7a24e31..0000000
--- a/test/117-nopatchoat/expected.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-JNI_OnLoad called
-Has oat is true, has executable oat is expected.
-This is a function call
diff --git a/test/117-nopatchoat/info.txt b/test/117-nopatchoat/info.txt
deleted file mode 100644
index aa9f57c..0000000
--- a/test/117-nopatchoat/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Test that disables patchoat'ing the application.
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
deleted file mode 100644
index a8a895a..0000000
--- a/test/117-nopatchoat/nopatchoat.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "class_linker.h"
-#include "dex/dex_file-inl.h"
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "mirror/class-inl.h"
-#include "oat_file.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-
-namespace art {
-
-class NoPatchoatTest {
- public:
- static const OatDexFile* getOatDexFile(jclass cls) {
- ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
- const DexFile& dex_file = klass->GetDexFile();
- return dex_file.GetOatDexFile();
- }
-
- static bool isRelocationDeltaZero() {
- std::vector<gc::space::ImageSpace*> spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- return !spaces.empty() && spaces[0]->GetImageHeader().GetPatchDelta() == 0;
- }
-
- static bool hasExecutableOat(jclass cls) {
- const OatDexFile* oat_dex_file = getOatDexFile(cls);
-
- return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
- }
-
- static bool needsRelocation(jclass cls) {
- const OatDexFile* oat_dex_file = getOatDexFile(cls);
-
- if (oat_dex_file == nullptr) {
- return false;
- }
-
- const OatFile* oat_file = oat_dex_file->GetOatFile();
- return !oat_file->IsPic()
- && CompilerFilter::IsAotCompilationEnabled(oat_file->GetCompilerFilter());
- }
-};
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isRelocationDeltaZero(JNIEnv*, jclass) {
- return NoPatchoatTest::isRelocationDeltaZero();
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
- return NoPatchoatTest::hasExecutableOat(cls);
-}
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_needsRelocation(JNIEnv*, jclass cls) {
- return NoPatchoatTest::needsRelocation(cls);
-}
-
-} // namespace art
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
deleted file mode 100755
index 4c33f7a..0000000
--- a/test/117-nopatchoat/run
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ensure flags includes prebuild and relocate. It doesn't make sense unless we
-# have a oat file we want to relocate.
-flags="$@"
-
-# This test is supposed to test with oat files. Make sure that the no-prebuild flag isn't set,
-# or complain.
-# Note: prebuild is the default.
-if [[ "${flags}" == *--no-prebuild* ]] ; then
- echo "Test 117-nopatchoat is not intended to run in no-prebuild mode."
- exit 1
-fi
-
-# This test is supposed to test relocation. Make sure that the no-relocate flag isn't set,
-# or complain.
-# Note: relocate is the default.
-if [[ "${flags}" == *--no-relocate* ]] ; then
- echo "Test 117-nopatchoat is not intended to run in no-relocate mode."
- exit 1
-fi
-
-${RUN} ${flags}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
deleted file mode 100644
index ef47ab9..0000000
--- a/test/117-nopatchoat/src/Main.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
-
- // With a relocationDelta of 0, the runtime has no way to determine if the oat file in
- // ANDROID_DATA has been relocated, since a non-relocated oat file always has a 0 delta.
- // Hitting this condition should be rare and ideally we would prevent it from happening but
- // there is no way to do so without major changes to the run-test framework.
- boolean executable_correct = (needsRelocation() ?
- hasExecutableOat() == isRelocationDeltaZero() :
- hasExecutableOat() == true);
-
- System.out.println(
- "Has oat is " + hasOatFile() + ", has executable oat is " + (
- executable_correct ? "expected" : "not expected") + ".");
-
- System.out.println(functionCall());
- }
-
- public static String functionCall() {
- String arr[] = {"This", "is", "a", "function", "call"};
- String ret = "";
- for (int i = 0; i < arr.length; i++) {
- ret = ret + arr[i] + " ";
- }
- return ret.substring(0, ret.length() - 1);
- }
-
- private native static boolean needsRelocation();
-
- private native static boolean hasOatFile();
-
- private native static boolean hasExecutableOat();
-
- private native static boolean isRelocationDeltaZero();
-}
diff --git a/test/119-noimage-patchoat/check b/test/119-noimage-patchoat/check
deleted file mode 100755
index d124ce8..0000000
--- a/test/119-noimage-patchoat/check
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Strip the process pids and line numbers from exact error messages.
-sed -e '/^dalvikvm\(\|32\|64\) E.*\] /d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/119-noimage-patchoat/expected.txt b/test/119-noimage-patchoat/expected.txt
deleted file mode 100644
index 9b9db58..0000000
--- a/test/119-noimage-patchoat/expected.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false
-JNI_OnLoad called
-Has image is false, is image dex2oat enabled is false.
-Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false -Xno-dex-file-fallback
-Failed to initialize runtime (check log for details)
-Run -Ximage-dex2oat
-JNI_OnLoad called
-Has image is true, is image dex2oat enabled is true.
-Run default
-JNI_OnLoad called
-Has image is true, is image dex2oat enabled is true.
diff --git a/test/119-noimage-patchoat/info.txt b/test/119-noimage-patchoat/info.txt
deleted file mode 100644
index 6b85368..0000000
--- a/test/119-noimage-patchoat/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Test that disables patchoat'ing the image.
diff --git a/test/119-noimage-patchoat/run b/test/119-noimage-patchoat/run
deleted file mode 100644
index 497dc4a..0000000
--- a/test/119-noimage-patchoat/run
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-flags="$@"
-
-# Force relocation otherwise we will just use the already created core.oat/art pair.
-# Note: relocate is the default.
-if [[ "${flags}" == *--no-relocate* ]] ; then
- echo "Test 119-noimage-patchoat is not intended to run in no-relocate mode."
- exit 1
-fi
-
-if [[ $@ == *--host* ]]; then
- false_bin="/bin/false"
-else
- false_bin="/system/bin/false"
-fi
-
-# Make sure we can run without an image file.
-echo "Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false"
-${RUN} ${flags} ${BPATH} --runtime-option -Xnoimage-dex2oat \
- --runtime-option -Xpatchoat:${false_bin}
-return_status1=$?
-
-# Make sure we cannot run without an image file without fallback.
-echo "Run -Xnoimage-dex2oat -Xpatchoat:/system/bin/false -Xno-dex-file-fallback"
-${RUN} ${flags} ${BPATH} --runtime-option -Xnoimage-dex2oat \
- --runtime-option -Xpatchoat:${false_bin} --runtime-option -Xno-dex-file-fallback
-# This second run is expected to fail: invert the return status of the previous command.
-return_status2=$((! $?))
-
-# Make sure we can run with the image file.
-echo "Run -Ximage-dex2oat"
-${RUN} ${flags} ${BPATH} --runtime-option -Ximage-dex2oat
-return_status3=$?
-
-# Make sure we can run with the default settings.
-echo "Run default"
-${RUN} ${flags} ${BPATH}
-return_status4=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2) && (exit $return_status3) && (exit $return_status4)
diff --git a/test/119-noimage-patchoat/src/Main.java b/test/119-noimage-patchoat/src/Main.java
deleted file mode 100644
index 6a70f58..0000000
--- a/test/119-noimage-patchoat/src/Main.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
- boolean hasImage = hasImage();
- System.out.println(
- "Has image is " + hasImage + ", is image dex2oat enabled is "
- + isImageDex2OatEnabled() + ".");
-
- if (hasImage && !isImageDex2OatEnabled()) {
- throw new Error("Image with dex2oat disabled runs with an oat file");
- } else if (!hasImage && isImageDex2OatEnabled()) {
- throw new Error("Image with dex2oat enabled runs without an oat file");
- }
- }
-
- private native static boolean hasImage();
-
- private native static boolean isImageDex2OatEnabled();
-}
diff --git a/test/1900-track-alloc/alloc.cc b/test/1900-track-alloc/alloc.cc
index db5617c..f209611 100644
--- a/test/1900-track-alloc/alloc.cc
+++ b/test/1900-track-alloc/alloc.cc
@@ -24,7 +24,7 @@
namespace art {
namespace Test1900TrackAlloc {
-typedef jvmtiError (*GetGlobalState)(jvmtiEnv* env, jlong* allocated);
+using GetGlobalState = jvmtiError(*)(jvmtiEnv* env, jlong* allocated);
struct AllocTrackingData {
GetGlobalState get_global_state;
diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java
index 97f0973..cc8a4c4 100644
--- a/test/1935-get-set-current-frame-jit/src/Main.java
+++ b/test/1935-get-set-current-frame-jit/src/Main.java
@@ -58,7 +58,8 @@
}
public void run() {
int TARGET = 42;
- if (hasJit() && expectOsr && !Main.isInterpreted()) {
+ boolean normalJit = hasJit() && getJitThreshold() != 0; // Excluding JIT-at-first-use.
+ if (normalJit && expectOsr && !Main.isInterpreted()) {
System.out.println("Unexpectedly in jit code prior to restarting the JIT!");
}
startJit();
@@ -72,10 +73,10 @@
do {
// Don't actually do anything here.
inBusyLoop = true;
- } while (hasJit() && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
+ } while (normalJit && !Main.isInOsrCode("run") && osrDeadline.compareTo(Instant.now()) > 0);
// We shouldn't be doing OSR since we are using JVMTI and the set prevents OSR.
// Set local will also push us to interpreter but the get local may remain in compiled code.
- if (hasJit()) {
+ if (normalJit) {
boolean inOsr = Main.isInOsrCode("run");
if (expectOsr && !inOsr) {
throw new Error(
@@ -184,4 +185,5 @@
public static native boolean stopJit();
public static native boolean startJit();
public static native boolean hasJit();
+ public static native int getJitThreshold();
}
diff --git a/test/1940-ddms-ext/ddm_ext.cc b/test/1940-ddms-ext/ddm_ext.cc
index cc29df9..452187b 100644
--- a/test/1940-ddms-ext/ddm_ext.cc
+++ b/test/1940-ddms-ext/ddm_ext.cc
@@ -25,7 +25,7 @@
namespace art {
namespace Test1940DdmExt {
-typedef jvmtiError (*DdmHandleChunk)(jvmtiEnv* env,
+using DdmHandleChunk = jvmtiError(*)(jvmtiEnv* env,
jint type_in,
jint len_in,
const jbyte* data_in,
diff --git a/test/1946-list-descriptors/descriptors.cc b/test/1946-list-descriptors/descriptors.cc
index 01b306d..07fee61 100644
--- a/test/1946-list-descriptors/descriptors.cc
+++ b/test/1946-list-descriptors/descriptors.cc
@@ -24,7 +24,7 @@
namespace art {
namespace Test1946Descriptors {
-typedef jvmtiError (*GetDescriptorList)(jvmtiEnv* env, jobject loader, jint* cnt, char*** descs);
+using GetDescriptorList = jvmtiError(*)(jvmtiEnv* env, jobject loader, jint* cnt, char*** descs);
struct DescriptorData {
GetDescriptorList get_descriptor_list;
diff --git a/test/1951-monitor-enter-no-suspend/raw_monitor.cc b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
index 0425e35..efd02b6 100644
--- a/test/1951-monitor-enter-no-suspend/raw_monitor.cc
+++ b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
@@ -26,7 +26,7 @@
namespace art {
namespace Test1951MonitorEnterNoSuspend {
-typedef jvmtiError (*RawMonitorEnterNoSuspend)(jvmtiEnv* env, jrawMonitorID mon);
+using RawMonitorEnterNoSuspend = jvmtiError(*)(jvmtiEnv* env, jrawMonitorID mon);
template <typename T>
static void Dealloc(T* t) {
diff --git a/test/1953-pop-frame/check b/test/1953-pop-frame/check
new file mode 100755
index 0000000..d552272
--- /dev/null
+++ b/test/1953-pop-frame/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+ (patch -p0 expected.txt < class-loading-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1953-pop-frame/class-loading-expected.patch b/test/1953-pop-frame/class-loading-expected.patch
new file mode 100644
index 0000000..2edef15
--- /dev/null
+++ b/test/1953-pop-frame/class-loading-expected.patch
@@ -0,0 +1,21 @@
+74a75,94
+> Test stopped during a ClassLoad event.
+> Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+> Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> art.Test1953.popFrame(Native Method)
+> art.Test1953.runTestOn(Test1953.java)
+> art.Test1953.runTestOn(Test1953.java)
+> art.Test1953.runTests(Test1953.java)
+> <Additional frames hidden>
+> TC0.foo == 1
+> result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+> Test stopped during a ClassPrepare event.
+> Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+> Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> art.Test1953.popFrame(Native Method)
+> art.Test1953.runTestOn(Test1953.java)
+> art.Test1953.runTestOn(Test1953.java)
+> art.Test1953.runTests(Test1953.java)
+> <Additional frames hidden>
+> TC1.foo == 2
+> result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1953-pop-frame/expected.txt b/test/1953-pop-frame/expected.txt
new file mode 100644
index 0000000..906703d
--- /dev/null
+++ b/test/1953-pop-frame/expected.txt
@@ -0,0 +1,98 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1953-pop-frame/info.txt b/test/1953-pop-frame/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1953-pop-frame/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1953-pop-frame/pop_frame.cc b/test/1953-pop-frame/pop_frame.cc
new file mode 100644
index 0000000..1c2d2a1
--- /dev/null
+++ b/test/1953-pop-frame/pop_frame.cc
@@ -0,0 +1,998 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1953PopFrame {
+
+struct TestData {
+ jlocation target_loc;
+ jmethodID target_method;
+ jclass target_klass;
+ jfieldID target_field;
+ jrawMonitorID notify_monitor;
+ jint frame_pop_offset;
+ jmethodID frame_pop_setup_method;
+ std::vector<std::string> interesting_classes;
+ bool hit_location;
+
+ TestData(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jlocation loc,
+ jobject meth,
+ jclass klass,
+ jobject field,
+ jobject setup_meth,
+ jint pop_offset,
+ const std::vector<std::string>&& interesting)
+ : target_loc(loc),
+ target_method(meth != nullptr ? env->FromReflectedMethod(meth) : nullptr),
+ target_klass(reinterpret_cast<jclass>(env->NewGlobalRef(klass))),
+ target_field(field != nullptr ? env->FromReflectedField(field) : nullptr),
+ frame_pop_offset(pop_offset),
+ frame_pop_setup_method(setup_meth != nullptr ? env->FromReflectedMethod(setup_meth)
+ : nullptr),
+ interesting_classes(interesting),
+ hit_location(false) {
+ JvmtiErrorToException(env, jvmti, jvmti->CreateRawMonitor("SuspendStopMonitor",
+ ¬ify_monitor));
+ }
+
+ void PerformSuspend(jvmtiEnv* jvmti, JNIEnv* env) {
+ // Wake up the waiting thread.
+ JvmtiErrorToException(env, jvmti, jvmti->RawMonitorEnter(notify_monitor));
+ hit_location = true;
+ JvmtiErrorToException(env, jvmti, jvmti->RawMonitorNotifyAll(notify_monitor));
+ JvmtiErrorToException(env, jvmti, jvmti->RawMonitorExit(notify_monitor));
+ // Suspend ourself
+ jvmti->SuspendThread(nullptr);
+ }
+};
+
+void JNICALL cbSingleStep(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID meth,
+ jlocation loc) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (meth != data->target_method || loc != data->target_loc) {
+ return;
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbExceptionCatch(jvmtiEnv *jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method,
+ jlocation location ATTRIBUTE_UNUSED,
+ jobject exception ATTRIBUTE_UNUSED) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (method != data->target_method) {
+ return;
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbException(jvmtiEnv *jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method,
+ jlocation location ATTRIBUTE_UNUSED,
+ jobject exception ATTRIBUTE_UNUSED,
+ jmethodID catch_method ATTRIBUTE_UNUSED,
+ jlocation catch_location ATTRIBUTE_UNUSED) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (method != data->target_method) {
+ return;
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodEntry(jvmtiEnv *jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (method != data->target_method) {
+ return;
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodExit(jvmtiEnv *jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method,
+ jboolean was_popped_by_exception ATTRIBUTE_UNUSED,
+ jvalue return_value ATTRIBUTE_UNUSED) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (method != data->target_method) {
+ return;
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldModification(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method ATTRIBUTE_UNUSED,
+ jlocation location ATTRIBUTE_UNUSED,
+ jclass field_klass ATTRIBUTE_UNUSED,
+ jobject object ATTRIBUTE_UNUSED,
+ jfieldID field,
+ char signature_type ATTRIBUTE_UNUSED,
+ jvalue new_value ATTRIBUTE_UNUSED) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (field != data->target_field) {
+ // TODO What to do here.
+ LOG(FATAL) << "Strange, shouldn't get here!";
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldAccess(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method ATTRIBUTE_UNUSED,
+ jlocation location ATTRIBUTE_UNUSED,
+ jclass field_klass,
+ jobject object ATTRIBUTE_UNUSED,
+ jfieldID field) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (field != data->target_field || !env->IsSameObject(field_klass, data->target_klass)) {
+ // TODO What to do here.
+ LOG(FATAL) << "Strange, shouldn't get here!";
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbBreakpointHit(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method,
+ jlocation loc) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (data->frame_pop_setup_method == method) {
+ CHECK(loc == 0) << "We should have stopped at location 0";
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->NotifyFramePop(thr, data->frame_pop_offset))) {
+ return;
+ }
+ return;
+ }
+ if (method != data->target_method || loc != data->target_loc) {
+ // TODO What to do here.
+ LOG(FATAL) << "Strange, shouldn't get here!";
+ }
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFramePop(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jmethodID method ATTRIBUTE_UNUSED,
+ jboolean was_popped_by_exception ATTRIBUTE_UNUSED) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbClassLoadOrPrepare(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthread thr,
+ jclass klass) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ char* name;
+ if (JvmtiErrorToException(env, jvmti, jvmti->GetClassSignature(klass, &name, nullptr))) {
+ return;
+ }
+ std::string name_str(name);
+ if (JvmtiErrorToException(env,
+ jvmti,
+ jvmti->Deallocate(reinterpret_cast<unsigned char*>(name)))) {
+ return;
+ }
+ if (std::find(data->interesting_classes.cbegin(),
+ data->interesting_classes.cend(),
+ name_str) != data->interesting_classes.cend()) {
+ data->PerformSuspend(jvmti, env);
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupTest(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+ jvmtiCapabilities caps;
+ memset(&caps, 0, sizeof(caps));
+ // Most of these will already be there but might as well be complete.
+ caps.can_pop_frame = 1;
+ caps.can_generate_single_step_events = 1;
+ caps.can_generate_breakpoint_events = 1;
+ caps.can_suspend = 1;
+ caps.can_generate_method_entry_events = 1;
+ caps.can_generate_method_exit_events = 1;
+ caps.can_generate_monitor_events = 1;
+ caps.can_generate_exception_events = 1;
+ caps.can_generate_frame_pop_events = 1;
+ caps.can_generate_field_access_events = 1;
+ caps.can_generate_field_modification_events = 1;
+ caps.can_redefine_classes = 1;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
+ return;
+ }
+ jvmtiEventCallbacks cb;
+ memset(&cb, 0, sizeof(cb));
+ // TODO Add the rest of these.
+ cb.Breakpoint = cbBreakpointHit;
+ cb.SingleStep = cbSingleStep;
+ cb.FieldAccess = cbFieldAccess;
+ cb.FieldModification = cbFieldModification;
+ cb.MethodEntry = cbMethodEntry;
+ cb.MethodExit = cbMethodExit;
+ cb.Exception = cbException;
+ cb.ExceptionCatch = cbExceptionCatch;
+ cb.FramePop = cbFramePop;
+ cb.ClassLoad = cbClassLoadOrPrepare;
+ cb.ClassPrepare = cbClassLoadOrPrepare;
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)));
+}
+
+static bool DeleteTestData(JNIEnv* env, jthread thr, TestData* data) {
+ env->DeleteGlobalRef(data->target_klass);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+ return false;
+ }
+ return JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data)));
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+ jobject meth,
+ jlocation loc,
+ jclass target_klass,
+ jobject field,
+ jobject setup_meth,
+ jint pop_offset,
+ const std::vector<std::string>&& interesting_names) {
+ void* data_ptr;
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->Allocate(sizeof(TestData),
+ reinterpret_cast<uint8_t**>(&data_ptr)))) {
+ return nullptr;
+ }
+ data = new (data_ptr) TestData(jvmti_env,
+ env,
+ loc,
+ meth,
+ target_klass,
+ field,
+ setup_meth,
+ pop_offset,
+ std::move(interesting_names));
+ if (env->ExceptionCheck()) {
+ env->DeleteGlobalRef(data->target_klass);
+ jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data));
+ return nullptr;
+ }
+ return data;
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+ jobject meth,
+ jlocation loc,
+ jclass target_klass,
+ jobject field,
+ jobject setup_meth,
+ jint pop_offset) {
+ std::vector<std::string> empty;
+ return SetupTestData(
+ env, meth, loc, target_klass, field, setup_meth, pop_offset, std::move(empty));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendClassEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint event_num,
+ jobjectArray interesting_names,
+ jthread thr) {
+ CHECK(event_num == JVMTI_EVENT_CLASS_LOAD || event_num == JVMTI_EVENT_CLASS_PREPARE);
+ std::vector<std::string> names;
+ jint cnt = env->GetArrayLength(interesting_names);
+ for (jint i = 0; i < cnt; i++) {
+ env->PushLocalFrame(1);
+ jstring name_obj = reinterpret_cast<jstring>(env->GetObjectArrayElement(interesting_names, i));
+ const char* name_chr = env->GetStringUTFChars(name_obj, nullptr);
+ names.push_back(std::string(name_chr));
+ env->ReleaseStringUTFChars(name_obj, name_chr);
+ env->PopLocalFrame(nullptr);
+ }
+ TestData* data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0, std::move(names));
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ static_cast<jvmtiEvent>(event_num),
+ thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendClassEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_CLASS_LOAD,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_CLASS_PREPARE,
+ thr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendSingleStepAt(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject meth,
+ jlocation loc,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_SINGLE_STEP,
+ thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendSingleStepFor(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_SINGLE_STEP,
+ thr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendPopFrameEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jint offset,
+ jobject breakpoint_func,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, nullptr, 0, nullptr, nullptr, breakpoint_func, offset);
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_FRAME_POP,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetBreakpoint(data->frame_pop_setup_method, 0))) {
+ return;
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendPopFrameEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_FRAME_POP,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->ClearBreakpoint(data->frame_pop_setup_method, 0))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendBreakpointFor(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject meth,
+ jlocation loc,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ thr))) {
+ return;
+ }
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetBreakpoint(data->target_method,
+ data->target_loc));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendBreakpointFor(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_BREAKPOINT,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->ClearBreakpoint(data->target_method,
+ data->target_loc))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendExceptionEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject method,
+ jboolean is_catch,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(
+ thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(
+ JVMTI_ENABLE,
+ is_catch ? JVMTI_EVENT_EXCEPTION_CATCH : JVMTI_EVENT_EXCEPTION,
+ thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendExceptionEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_EXCEPTION_CATCH,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_EXCEPTION,
+ thr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupSuspendMethodEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jobject method,
+ jboolean enter,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(
+ thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(
+ JVMTI_ENABLE,
+ enter ? JVMTI_EVENT_METHOD_ENTRY : JVMTI_EVENT_METHOD_EXIT,
+ thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearSuspendMethodEvent(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_METHOD_EXIT,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_METHOD_ENTRY,
+ thr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupFieldSuspendFor(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jclass target_klass,
+ jobject field,
+ jboolean access,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(
+ thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, nullptr, 0, target_klass, field, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(
+ JVMTI_ENABLE,
+ access ? JVMTI_EVENT_FIELD_ACCESS : JVMTI_EVENT_FIELD_MODIFICATION,
+ thr))) {
+ return;
+ }
+ if (access) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldAccessWatch(data->target_klass,
+ data->target_field));
+ } else {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldModificationWatch(data->target_klass,
+ data->target_field));
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearFieldSuspendFor(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_FIELD_ACCESS,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
+ JVMTI_EVENT_FIELD_MODIFICATION,
+ thr))) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->ClearFieldModificationWatch(
+ data->target_klass, data->target_field)) &&
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->ClearFieldAccessWatch(
+ data->target_klass, data->target_field))) {
+ return;
+ } else {
+ env->ExceptionClear();
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_setupWaitForNativeCall(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(
+ thr, reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data == nullptr) << "Data was not cleared!";
+ data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0);
+ if (data == nullptr) {
+ return;
+ }
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, data))) {
+ return;
+ }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_clearWaitForNativeCall(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+ return;
+ }
+ DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_waitForSuspendHit(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(thr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(data->notify_monitor))) {
+ return;
+ }
+ while (!data->hit_location) {
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorWait(data->notify_monitor, -1))) {
+ return;
+ }
+ }
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(data->notify_monitor))) {
+ return;
+ }
+ jint state = 0;
+ while (!JvmtiErrorToException(env, jvmti_env, jvmti_env->GetThreadState(thr, &state)) &&
+ (state & JVMTI_THREAD_STATE_SUSPENDED) == 0) { }
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_popFrame(JNIEnv* env,
+ jclass klass ATTRIBUTE_UNUSED,
+ jthread thr) {
+ JvmtiErrorToException(env, jvmti_env, jvmti_env->PopFrame(thr));
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_00024NativeCalledObject_calledFunction(
+ JNIEnv* env, jobject thiz) {
+ env->PushLocalFrame(1);
+ jclass klass = env->GetObjectClass(thiz);
+ jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
+ env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+ env->PopLocalFrame(nullptr);
+ TestData *data;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
+ reinterpret_cast<void**>(&data)))) {
+ return;
+ }
+ CHECK(data != nullptr);
+ data->PerformSuspend(jvmti_env, env);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1953_00024NativeCallerObject_run(
+ JNIEnv* env, jobject thiz) {
+ env->PushLocalFrame(1);
+ jclass klass = env->GetObjectClass(thiz);
+ jfieldID baseCnt = env->GetFieldID(klass, "baseCnt", "I");
+ env->SetIntField(thiz, baseCnt, env->GetIntField(thiz, baseCnt) + 1);
+ jmethodID called = env->GetMethodID(klass, "calledFunction", "()V");
+ env->CallVoidMethod(thiz, called);
+ env->PopLocalFrame(nullptr);
+}
+
+extern "C" JNIEXPORT
+jboolean JNICALL Java_art_Test1953_isClassLoaded(JNIEnv* env, jclass, jstring name) {
+ ScopedUtfChars chr(env, name);
+ if (env->ExceptionCheck()) {
+ return false;
+ }
+ jint cnt = 0;
+ jclass* klasses = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetLoadedClasses(&cnt, &klasses))) {
+ return false;
+ }
+ bool res = false;
+ for (jint i = 0; !res && i < cnt; i++) {
+ char* sig;
+ if (JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->GetClassSignature(klasses[i], &sig, nullptr))) {
+ return false;
+ }
+ res = (strcmp(sig, chr.c_str()) == 0);
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+ }
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+ return res;
+}
+
+} // namespace Test1953PopFrame
+} // namespace art
+
diff --git a/test/1953-pop-frame/run b/test/1953-pop-frame/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1953-pop-frame/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1953-pop-frame/src/Main.java b/test/1953-pop-frame/src/Main.java
new file mode 100644
index 0000000..156076e
--- /dev/null
+++ b/test/1953-pop-frame/src/Main.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1953.run(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS"));
+ }
+}
diff --git a/test/1953-pop-frame/src/art/Breakpoint.java b/test/1953-pop-frame/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1953-pop-frame/src/art/Redefinition.java b/test/1953-pop-frame/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1953-pop-frame/src/art/StackTrace.java b/test/1953-pop-frame/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1953-pop-frame/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+ public static class StackFrameData {
+ public final Thread thr;
+ public final Executable method;
+ public final long current_location;
+ public final int depth;
+
+ public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+ this.thr = thr;
+ this.method = e;
+ this.current_location = loc;
+ this.depth = depth;
+ }
+ @Override
+ public String toString() {
+ return String.format(
+ "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+ this.thr,
+ this.method,
+ this.current_location,
+ this.depth);
+ }
+ }
+
+ public static native int GetStackDepth(Thread thr);
+
+ private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+ public static StackFrameData[] GetStackTrace(Thread thr) {
+ // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+ // suspended. The spec says that not being suspended is fine but since we want this to be
+ // consistent we will suspend for the RI.
+ boolean suspend_thread =
+ !System.getProperty("java.vm.name").equals("Dalvik") &&
+ !thr.equals(Thread.currentThread()) &&
+ !Suspension.isSuspended(thr);
+ if (suspend_thread) {
+ Suspension.suspend(thr);
+ }
+ StackFrameData[] out = nativeGetStackTrace(thr);
+ if (suspend_thread) {
+ Suspension.resume(thr);
+ }
+ return out;
+ }
+}
+
diff --git a/test/1953-pop-frame/src/art/Suspension.java b/test/1953-pop-frame/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1953-pop-frame/src/art/Test1953.java b/test/1953-pop-frame/src/art/Test1953.java
new file mode 100644
index 0000000..adec776
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Test1953.java
@@ -0,0 +1,976 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Base64;
+import java.util.EnumSet;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+
+public class Test1953 {
+ public final boolean canRunClassLoadTests;
+ public static void doNothing() {}
+
+ public interface TestRunnable extends Runnable {
+ public int getBaseCallCount();
+ public Method getCalledMethod() throws Exception;
+ public default Method getCallingMethod() throws Exception {
+ return this.getClass().getMethod("run");
+ };
+ }
+
+ public static interface TestSuspender {
+ public void setup(Thread thr);
+ public void waitForSuspend(Thread thr);
+ public void cleanup(Thread thr);
+ }
+
+ public static interface ThreadRunnable { public void run(Thread thr); }
+ public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
+ return new TestSuspender() {
+ public void setup(Thread thr) { setup.run(thr); }
+ public void waitForSuspend(Thread thr) { Test1953.waitForSuspendHit(thr); }
+ public void cleanup(Thread thr) { clean.run(thr); }
+ };
+ }
+
+ public void runTestOn(TestRunnable testObj, ThreadRunnable su, ThreadRunnable cl) throws
+ Exception {
+ runTestOn(testObj, makeSuspend(su, cl));
+ }
+
+ private static void SafePrintStackTrace(StackTraceElement st[]) {
+ for (StackTraceElement e : st) {
+ System.out.println("\t" + e.getClassName() + "." + e.getMethodName() + "(" +
+ (e.isNativeMethod() ? "Native Method" : e.getFileName()) + ")");
+ if (e.getClassName().equals("art.Test1953") && e.getMethodName().equals("runTests")) {
+ System.out.println("\t<Additional frames hidden>");
+ break;
+ }
+ }
+ }
+
+ public void runTestOn(TestRunnable testObj, TestSuspender su) throws Exception {
+ System.out.println("Single call with PopFrame on " + testObj + " base-call-count: " +
+ testObj.getBaseCallCount());
+ final CountDownLatch continue_latch = new CountDownLatch(1);
+ final CountDownLatch startup_latch = new CountDownLatch(1);
+ Runnable await = () -> {
+ try {
+ startup_latch.countDown();
+ continue_latch.await();
+ } catch (Exception e) {
+ throw new Error("Failed to await latch", e);
+ }
+ };
+ Thread thr = new Thread(() -> { await.run(); testObj.run(); });
+ thr.start();
+
+ // Wait until the other thread is started.
+ startup_latch.await();
+
+ // Do any final setup.
+ preTest.accept(testObj);
+
+ // Setup suspension method on the thread.
+ su.setup(thr);
+
+ // Let the other thread go.
+ continue_latch.countDown();
+
+ // Wait for the other thread to hit the breakpoint/watchpoint/whatever and suspend itself
+ // (without re-entering java)
+ su.waitForSuspend(thr);
+
+ // Cleanup the breakpoint/watchpoint/etc.
+ su.cleanup(thr);
+
+ try {
+ // Pop the frame.
+ popFrame(thr);
+ } catch (Exception e) {
+ System.out.println("Failed to pop frame due to " + e);
+ SafePrintStackTrace(e.getStackTrace());
+ }
+
+ // Start the other thread going again.
+ Suspension.resume(thr);
+
+ // Wait for the other thread to finish.
+ thr.join();
+
+ // See how many times calledFunction was called.
+ System.out.println("result is " + testObj + " base-call count: " + testObj.getBaseCallCount());
+ }
+
+ public static abstract class AbstractTestObject implements TestRunnable {
+ public int callerCnt;
+
+ public AbstractTestObject() {
+ callerCnt = 0;
+ }
+
+ public int getBaseCallCount() {
+ return callerCnt;
+ }
+
+ public void run() {
+ callerCnt++;
+ // This function should be re-executed by the popFrame.
+ calledFunction();
+ }
+
+ public Method getCalledMethod() throws Exception {
+ return this.getClass().getMethod("calledFunction");
+ }
+
+ public abstract void calledFunction();
+ }
+
+ public static class RedefineTestObject extends AbstractTestObject implements Runnable {
+ public static enum RedefineState { ORIGINAL, REDEFINED, };
+ /* public static class RedefineTestObject extends AbstractTestObject implements Runnable {
+ * public static final byte[] CLASS_BYTES;
+ * public static final byte[] DEX_BYTES;
+ * static {
+ * CLASS_BYTES = null;
+ * DEX_BYTES = null;
+ * }
+ *
+ * public EnumSet<RedefineState> redefine_states;
+ * public RedefineTestObject() {
+ * super();
+ * redefine_states = EnumSet.noneOf(RedefineState.class);
+ * }
+ * public String toString() {
+ * return "RedefineTestObject { states: " + redefine_states.toString()
+ * + " current: REDEFINED }";
+ * }
+ * public void calledFunction() {
+ * redefine_states.add(RedefineState.REDEFINED); // line +0
+ * // We will trigger the redefinition using a breakpoint on the next line.
+ * doNothing(); // line +2
+ * }
+ * }
+ */
+ public static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+ "yv66vgAAADUATQoADQAjBwAkCgAlACYJAAwAJwoAJQAoEgAAACwJAAIALQoAJQAuCgAvADAJAAwA" +
+ "MQkADAAyBwAzBwA0BwA2AQASUmVkZWZpbmVUZXN0T2JqZWN0AQAMSW5uZXJDbGFzc2VzAQANUmVk" +
+ "ZWZpbmVTdGF0ZQEAC0NMQVNTX0JZVEVTAQACW0IBAAlERVhfQllURVMBAA9yZWRlZmluZV9zdGF0" +
+ "ZXMBABNMamF2YS91dGlsL0VudW1TZXQ7AQAJU2lnbmF0dXJlAQBETGphdmEvdXRpbC9FbnVtU2V0" +
+ "PExhcnQvVGVzdDE5NTMkUmVkZWZpbmVUZXN0T2JqZWN0JFJlZGVmaW5lU3RhdGU7PjsBAAY8aW5p" +
+ "dD4BAAMoKVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAIdG9TdHJpbmcBABQoKUxqYXZhL2xh" +
+ "bmcvU3RyaW5nOwEADmNhbGxlZEZ1bmN0aW9uAQAIPGNsaW5pdD4BAApTb3VyY2VGaWxlAQANVGVz" +
+ "dDE5NTMuamF2YQwAGQAaAQAtYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmlu" +
+ "ZVN0YXRlBwA3DAA4ADkMABUAFgwAHQAeAQAQQm9vdHN0cmFwTWV0aG9kcw8GADoIADsMADwAPQwA" +
+ "PgA/DABAAEEHAEIMAEMAGgwAEgATDAAUABMBAB9hcnQvVGVzdDE5NTMkUmVkZWZpbmVUZXN0T2Jq" +
+ "ZWN0AQAfYXJ0L1Rlc3QxOTUzJEFic3RyYWN0VGVzdE9iamVjdAEAEkFic3RyYWN0VGVzdE9iamVj" +
+ "dAEAEmphdmEvbGFuZy9SdW5uYWJsZQEAEWphdmEvdXRpbC9FbnVtU2V0AQAGbm9uZU9mAQAmKExq" +
+ "YXZhL2xhbmcvQ2xhc3M7KUxqYXZhL3V0aWwvRW51bVNldDsKAEQARQEAM1JlZGVmaW5lVGVzdE9i" +
+ "amVjdCB7IHN0YXRlczogASBjdXJyZW50OiBSRURFRklORUQgfQEAF21ha2VDb25jYXRXaXRoQ29u" +
+ "c3RhbnRzAQAmKExqYXZhL2xhbmcvU3RyaW5nOylMamF2YS9sYW5nL1N0cmluZzsBAAlSRURFRklO" +
+ "RUQBAC9MYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmluZVN0YXRlOwEAA2Fk" +
+ "ZAEAFShMamF2YS9sYW5nL09iamVjdDspWgEADGFydC9UZXN0MTk1MwEACWRvTm90aGluZwcARgwA" +
+ "PABJAQAkamF2YS9sYW5nL2ludm9rZS9TdHJpbmdDb25jYXRGYWN0b3J5BwBLAQAGTG9va3VwAQCY" +
+ "KExqYXZhL2xhbmcvaW52b2tlL01ldGhvZEhhbmRsZXMkTG9va3VwO0xqYXZhL2xhbmcvU3RyaW5n" +
+ "O0xqYXZhL2xhbmcvaW52b2tlL01ldGhvZFR5cGU7TGphdmEvbGFuZy9TdHJpbmc7W0xqYXZhL2xh" +
+ "bmcvT2JqZWN0OylMamF2YS9sYW5nL2ludm9rZS9DYWxsU2l0ZTsHAEwBACVqYXZhL2xhbmcvaW52" +
+ "b2tlL01ldGhvZEhhbmRsZXMkTG9va3VwAQAeamF2YS9sYW5nL2ludm9rZS9NZXRob2RIYW5kbGVz" +
+ "ACEADAANAAEADgADABkAEgATAAAAGQAUABMAAAABABUAFgABABcAAAACABgABAABABkAGgABABsA" +
+ "AAAuAAIAAQAAAA4qtwABKhICuAADtQAEsQAAAAEAHAAAAA4AAwAAACEABAAiAA0AIwABAB0AHgAB" +
+ "ABsAAAAlAAEAAQAAAA0qtAAEtgAFugAGAACwAAAAAQAcAAAABgABAAAAJQABAB8AGgABABsAAAAv" +
+ "AAIAAQAAAA8qtAAEsgAHtgAIV7gACbEAAAABABwAAAAOAAMAAAApAAsAKwAOACwACAAgABoAAQAb" +
+ "AAAAKQABAAAAAAAJAbMACgGzAAuxAAAAAQAcAAAADgADAAAAGwAEABwACAAdAAMAIQAAAAIAIgAQ" +
+ "AAAAIgAEAAwALwAPAAkAAgAMABFAGQANAC8ANQQJAEcASgBIABkAKQAAAAgAAQAqAAEAKw==");
+ public static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+ "ZGV4CjAzNQAaR23N6WpunLRVX+BexSuzzNNiHNOvQpFoBwAAcAAAAHhWNBIAAAAAAAAAAKQGAAAq" +
+ "AAAAcAAAABEAAAAYAQAABQAAAFwBAAAEAAAAmAEAAAwAAAC4AQAAAQAAABgCAAAwBQAAOAIAACID" +
+ "AAA5AwAAQwMAAEsDAABPAwAAXAMAAGcDAABqAwAAbgMAAJEDAADCAwAA5QMAAPUDAAAZBAAAOQQA" +
+ "AFwEAAB7BAAAjgQAAKIEAAC4BAAAzAQAAOcEAAD8BAAAEQUAABwFAAAwBQAATwUAAF4FAABhBQAA" +
+ "ZAUAAGgFAABsBQAAeQUAAH4FAACGBQAAlgUAAKEFAACnBQAArwUAAMAFAADKBQAA0QUAAAgAAAAJ" +
+ "AAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAAGwAAABwA" +
+ "AAAeAAAABgAAAAsAAAAAAAAABwAAAAwAAAAMAwAABwAAAA0AAAAUAwAAGwAAAA4AAAAAAAAAHQAA" +
+ "AA8AAAAcAwAAAQABABcAAAACABAABAAAAAIAEAAFAAAAAgANACYAAAAAAAMAAgAAAAIAAwABAAAA" +
+ "AgADAAIAAAACAAMAIgAAAAIAAAAnAAAAAwADACMAAAAMAAMAAgAAAAwAAQAhAAAADAAAACcAAAAN" +
+ "AAQAIAAAAA0AAgAlAAAADQAAACcAAAACAAAAAQAAAAAAAAAEAwAAGgAAAIwGAABRBgAAAAAAAAQA" +
+ "AQACAAAA+gIAAB0AAABUMAMAbhALAAAADAAiAQwAcBAGAAEAGgIZAG4gBwAhAG4gBwABABoAAABu" +
+ "IAcAAQBuEAgAAQAMABEAAAABAAAAAAAAAPQCAAAGAAAAEgBpAAEAaQACAA4AAgABAAEAAADuAgAA" +
+ "DAAAAHAQAAABABwAAQBxEAoAAAAMAFsQAwAOAAMAAQACAAAA/gIAAAsAAABUIAMAYgEAAG4gCQAQ" +
+ "AHEABQAAAA4AIQAOPIcAGwAOPC0AJQAOACkADnk8AAEAAAAKAAAAAQAAAAsAAAABAAAACAAAAAEA" +
+ "AAAJABUgY3VycmVudDogUkVERUZJTkVEIH0ACDxjbGluaXQ+AAY8aW5pdD4AAj47AAtDTEFTU19C" +
+ "WVRFUwAJREVYX0JZVEVTAAFMAAJMTAAhTGFydC9UZXN0MTk1MyRBYnN0cmFjdFRlc3RPYmplY3Q7" +
+ "AC9MYXJ0L1Rlc3QxOTUzJFJlZGVmaW5lVGVzdE9iamVjdCRSZWRlZmluZVN0YXRlOwAhTGFydC9U" +
+ "ZXN0MTk1MyRSZWRlZmluZVRlc3RPYmplY3Q7AA5MYXJ0L1Rlc3QxOTUzOwAiTGRhbHZpay9hbm5v" +
+ "dGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ACFM" +
+ "ZGFsdmlrL2Fubm90YXRpb24vTWVtYmVyQ2xhc3NlczsAHUxkYWx2aWsvYW5ub3RhdGlvbi9TaWdu" +
+ "YXR1cmU7ABFMamF2YS9sYW5nL0NsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5n" +
+ "L1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7" +
+ "ABNMamF2YS91dGlsL0VudW1TZXQ7ABNMamF2YS91dGlsL0VudW1TZXQ8AAlSRURFRklORUQAElJl" +
+ "ZGVmaW5lVGVzdE9iamVjdAAdUmVkZWZpbmVUZXN0T2JqZWN0IHsgc3RhdGVzOiAADVRlc3QxOTUz" +
+ "LmphdmEAAVYAAVoAAlpMAAJbQgALYWNjZXNzRmxhZ3MAA2FkZAAGYXBwZW5kAA5jYWxsZWRGdW5j" +
+ "dGlvbgAJZG9Ob3RoaW5nAARuYW1lAAZub25lT2YAD3JlZGVmaW5lX3N0YXRlcwAIdG9TdHJpbmcA" +
+ "BXZhbHVlAFt+fkQ4eyJtaW4tYXBpIjoxLCJzaGEtMSI6IjUyNzNjM2RmZWUxMDQ2NzIwYWY0MjVm" +
+ "YTg1NTMxNmM5OWM4NmM4ZDIiLCJ2ZXJzaW9uIjoiMS4zLjE4LWRldiJ9AAIHASgcAxcWFwkXAwIE" +
+ "ASgYAwIFAh8ECSQXGAIGASgcARgBAgECAgEZARkDAQGIgASEBQGBgASgBQMByAUBAbgEAAAAAAAB" +
+ "AAAALgYAAAMAAAA6BgAAQAYAAEkGAAB8BgAAAQAAAAAAAAAAAAAAAwAAAHQGAAAQAAAAAAAAAAEA" +
+ "AAAAAAAAAQAAACoAAABwAAAAAgAAABEAAAAYAQAAAwAAAAUAAABcAQAABAAAAAQAAACYAQAABQAA" +
+ "AAwAAAC4AQAABgAAAAEAAAAYAgAAASAAAAQAAAA4AgAAAyAAAAQAAADuAgAAARAAAAQAAAAEAwAA" +
+ "AiAAACoAAAAiAwAABCAAAAQAAAAuBgAAACAAAAEAAABRBgAAAxAAAAMAAABwBgAABiAAAAEAAACM" +
+ "BgAAABAAAAEAAACkBgAA");
+
+ public EnumSet<RedefineState> redefine_states;
+ public RedefineTestObject() {
+ super();
+ redefine_states = EnumSet.noneOf(RedefineState.class);
+ }
+
+ public String toString() {
+ return "RedefineTestObject { states: " + redefine_states.toString() + " current: ORIGINAL }";
+ }
+
+ public void calledFunction() {
+ redefine_states.add(RedefineState.ORIGINAL); // line +0
+ // We will trigger the redefinition using a breakpoint on the next line.
+ doNothing(); // line +2
+ }
+ }
+
+ public static class ClassLoadObject implements TestRunnable {
+ public int cnt;
+ public int baseCallCnt;
+
+ public static final String[] CLASS_NAMES = new String[] {
+ "Lart/Test1953$ClassLoadObject$TC0;",
+ "Lart/Test1953$ClassLoadObject$TC1;",
+ "Lart/Test1953$ClassLoadObject$TC2;",
+ "Lart/Test1953$ClassLoadObject$TC3;",
+ "Lart/Test1953$ClassLoadObject$TC4;",
+ "Lart/Test1953$ClassLoadObject$TC5;",
+ "Lart/Test1953$ClassLoadObject$TC6;",
+ "Lart/Test1953$ClassLoadObject$TC7;",
+ "Lart/Test1953$ClassLoadObject$TC8;",
+ "Lart/Test1953$ClassLoadObject$TC9;",
+ };
+
+ private static int curClass = 0;
+
+ private static class TC0 { public static int foo; static { foo = 1; } }
+ private static class TC1 { public static int foo; static { foo = 2; } }
+ private static class TC2 { public static int foo; static { foo = 3; } }
+ private static class TC3 { public static int foo; static { foo = 4; } }
+ private static class TC4 { public static int foo; static { foo = 5; } }
+ private static class TC5 { public static int foo; static { foo = 6; } }
+ private static class TC6 { public static int foo; static { foo = 7; } }
+ private static class TC7 { public static int foo; static { foo = 8; } }
+ private static class TC8 { public static int foo; static { foo = 9; } }
+ private static class TC9 { public static int foo; static { foo = 10; } }
+
+ public ClassLoadObject() {
+ super();
+ cnt = 0;
+ baseCallCnt = 0;
+ }
+
+ public int getBaseCallCount() {
+ return baseCallCnt;
+ }
+
+ public void run() {
+ baseCallCnt++;
+ if (curClass == 0) {
+ $noprecompile$calledFunction0();
+ } else if (curClass == 1) {
+ $noprecompile$calledFunction1();
+ } else if (curClass == 2) {
+ $noprecompile$calledFunction2();
+ } else if (curClass == 3) {
+ $noprecompile$calledFunction3();
+ } else if (curClass == 4) {
+ $noprecompile$calledFunction4();
+ } else if (curClass == 5) {
+ $noprecompile$calledFunction5();
+ } else if (curClass == 6) {
+ $noprecompile$calledFunction6();
+ } else if (curClass == 7) {
+ $noprecompile$calledFunction7();
+ } else if (curClass == 8) {
+ $noprecompile$calledFunction8();
+ } else if (curClass == 9) {
+ $noprecompile$calledFunction9();
+ }
+ curClass++;
+ }
+
+ public Method getCalledMethod() throws Exception {
+ return this.getClass().getMethod("jnoprecompile$calledFunction" + curClass);
+ }
+
+ // Give these all a tag to prevent 1954 from compiling them (and loading the class as a
+ // consequence).
+ public void $noprecompile$calledFunction0() {
+ cnt++;
+ System.out.println("TC0.foo == " + TC0.foo);
+ }
+
+ public void $noprecompile$calledFunction1() {
+ cnt++;
+ System.out.println("TC1.foo == " + TC1.foo);
+ }
+
+ public void $noprecompile$calledFunction2() {
+ cnt++;
+ System.out.println("TC2.foo == " + TC2.foo);
+ }
+
+ public void $noprecompile$calledFunction3() {
+ cnt++;
+ System.out.println("TC3.foo == " + TC3.foo);
+ }
+
+ public void $noprecompile$calledFunction4() {
+ cnt++;
+ System.out.println("TC4.foo == " + TC4.foo);
+ }
+
+ public void $noprecompile$calledFunction5() {
+ cnt++;
+ System.out.println("TC5.foo == " + TC5.foo);
+ }
+
+ public void $noprecompile$calledFunction6() {
+ cnt++;
+ System.out.println("TC6.foo == " + TC6.foo);
+ }
+
+ public void $noprecompile$calledFunction7() {
+ cnt++;
+ System.out.println("TC7.foo == " + TC7.foo);
+ }
+
+ public void $noprecompile$calledFunction8() {
+ cnt++;
+ System.out.println("TC8.foo == " + TC8.foo);
+ }
+
+ public void $noprecompile$calledFunction9() {
+ cnt++;
+ System.out.println("TC9.foo == " + TC9.foo);
+ }
+
+ public String toString() {
+ return "ClassLoadObject { cnt: " + cnt + ", curClass: " + curClass + "}";
+ }
+ }
+
+ public static class FieldBasedTestObject extends AbstractTestObject implements Runnable {
+ public int cnt;
+ public int TARGET_FIELD;
+ public FieldBasedTestObject() {
+ super();
+ cnt = 0;
+ TARGET_FIELD = 0;
+ }
+
+ public void calledFunction() {
+ cnt++;
+ // We put a watchpoint here and PopFrame when we are at it.
+ TARGET_FIELD += 10;
+ if (cnt == 1) { System.out.println("FAILED: No pop on first call!"); }
+ }
+
+ public String toString() {
+ return "FieldBasedTestObject { cnt: " + cnt + ", TARGET_FIELD: " + TARGET_FIELD + " }";
+ }
+ }
+
+ public static class StandardTestObject extends AbstractTestObject implements Runnable {
+ public int cnt;
+ public final boolean check;
+
+ public StandardTestObject(boolean check) {
+ super();
+ cnt = 0;
+ this.check = check;
+ }
+
+ public StandardTestObject() {
+ this(true);
+ }
+
+ public void calledFunction() {
+ cnt++; // line +0
+ // We put a breakpoint here and PopFrame when we are at it.
+ doNothing(); // line +2
+ if (check && cnt == 1) { System.out.println("FAILED: No pop on first call!"); }
+ }
+
+ public String toString() {
+ return "StandardTestObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class SynchronizedFunctionTestObject extends AbstractTestObject implements Runnable {
+ public int cnt;
+
+ public SynchronizedFunctionTestObject() {
+ super();
+ cnt = 0;
+ }
+
+ public synchronized void calledFunction() {
+ cnt++; // line +0
+ // We put a breakpoint here and PopFrame when we are at it.
+ doNothing(); // line +2
+ }
+
+ public String toString() {
+ return "SynchronizedFunctionTestObject { cnt: " + cnt + " }";
+ }
+ }
+ public static class SynchronizedTestObject extends AbstractTestObject implements Runnable {
+ public int cnt;
+ public final Object lock;
+
+ public SynchronizedTestObject() {
+ super();
+ cnt = 0;
+ lock = new Object();
+ }
+
+ public void calledFunction() {
+ synchronized (lock) { // line +0
+ cnt++; // line +1
+ // We put a breakpoint here and PopFrame when we are at it.
+ doNothing(); // line +3
+ }
+ }
+
+ public String toString() {
+ return "SynchronizedTestObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class ExceptionCatchTestObject extends AbstractTestObject implements Runnable {
+ public static class TestError extends Error {}
+
+ public int cnt;
+ public ExceptionCatchTestObject() {
+ super();
+ cnt = 0;
+ }
+
+ public void calledFunction() {
+ cnt++;
+ try {
+ doThrow();
+ } catch (TestError e) {
+ System.out.println(e.getClass().getName() + " caught in called function.");
+ }
+ }
+
+ public void doThrow() {
+ throw new TestError();
+ }
+
+ public String toString() {
+ return "ExceptionCatchTestObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class ExceptionThrowFarTestObject implements TestRunnable {
+ public static class TestError extends Error {}
+
+ public int cnt;
+ public int baseCallCnt;
+ public final boolean catchInCalled;
+ public ExceptionThrowFarTestObject(boolean catchInCalled) {
+ super();
+ cnt = 0;
+ baseCallCnt = 0;
+ this.catchInCalled = catchInCalled;
+ }
+
+ public int getBaseCallCount() {
+ return baseCallCnt;
+ }
+
+ public void run() {
+ baseCallCnt++;
+ try {
+ callingFunction();
+ } catch (TestError e) {
+ System.out.println(e.getClass().getName() + " thrown and caught!");
+ }
+ }
+
+ public void callingFunction() {
+ calledFunction();
+ }
+ public void calledFunction() {
+ cnt++;
+ if (catchInCalled) {
+ try {
+ throw new TestError(); // We put a watch here.
+ } catch (TestError e) {
+ System.out.println(e.getClass().getName() + " caught in same function.");
+ }
+ } else {
+ throw new TestError(); // We put a watch here.
+ }
+ }
+
+ public Method getCallingMethod() throws Exception {
+ return this.getClass().getMethod("callingFunction");
+ }
+
+ public Method getCalledMethod() throws Exception {
+ return this.getClass().getMethod("calledFunction");
+ }
+
+ public String toString() {
+ return "ExceptionThrowFarTestObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class ExceptionOnceObject extends AbstractTestObject {
+ public static final class TestError extends Error {}
+ public int cnt;
+ public final boolean throwInSub;
+ public ExceptionOnceObject(boolean throwInSub) {
+ super();
+ cnt = 0;
+ this.throwInSub = throwInSub;
+ }
+
+ public void calledFunction() {
+ cnt++;
+ if (cnt == 1) {
+ if (throwInSub) {
+ doThrow();
+ } else {
+ throw new TestError();
+ }
+ }
+ }
+
+ public void doThrow() {
+ throw new TestError();
+ }
+
+ public String toString() {
+ return "ExceptionOnceObject { cnt: " + cnt + ", throwInSub: " + throwInSub + " }";
+ }
+ }
+
+ public static class ExceptionThrowTestObject implements TestRunnable {
+ public static class TestError extends Error {}
+
+ public int cnt;
+ public int baseCallCnt;
+ public final boolean catchInCalled;
+ public ExceptionThrowTestObject(boolean catchInCalled) {
+ super();
+ cnt = 0;
+ baseCallCnt = 0;
+ this.catchInCalled = catchInCalled;
+ }
+
+ public int getBaseCallCount() {
+ return baseCallCnt;
+ }
+
+ public void run() {
+ baseCallCnt++;
+ try {
+ calledFunction();
+ } catch (TestError e) {
+ System.out.println(e.getClass().getName() + " thrown and caught!");
+ }
+ }
+
+ public void calledFunction() {
+ cnt++;
+ if (catchInCalled) {
+ try {
+ throw new TestError(); // We put a watch here.
+ } catch (TestError e) {
+ System.out.println(e.getClass().getName() + " caught in same function.");
+ }
+ } else {
+ throw new TestError(); // We put a watch here.
+ }
+ }
+
+ public Method getCalledMethod() throws Exception {
+ return this.getClass().getMethod("calledFunction");
+ }
+
+ public String toString() {
+ return "ExceptionThrowTestObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class NativeCalledObject extends AbstractTestObject {
+ public int cnt = 0;
+
+ public native void calledFunction();
+
+ public String toString() {
+ return "NativeCalledObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static class NativeCallerObject implements TestRunnable {
+ public int baseCnt = 0;
+ public int cnt = 0;
+
+ public int getBaseCallCount() {
+ return baseCnt;
+ }
+
+ public native void run();
+
+ public void calledFunction() {
+ cnt++;
+ // We will stop using a MethodExit event.
+ }
+
+ public Method getCalledMethod() throws Exception {
+ return this.getClass().getMethod("calledFunction");
+ }
+
+ public String toString() {
+ return "NativeCallerObject { cnt: " + cnt + " }";
+ }
+ }
+ public static class SuspendSuddenlyObject extends AbstractTestObject {
+ public volatile boolean stop_spinning = false;
+ public volatile boolean is_spinning = false;
+ public int cnt = 0;
+
+ public void calledFunction() {
+ cnt++;
+ while (!stop_spinning) {
+ is_spinning = true;
+ }
+ }
+
+ public String toString() {
+ return "SuspendSuddenlyObject { cnt: " + cnt + " }";
+ }
+ }
+
+ public static void run(boolean canRunClassLoadTests) throws Exception {
+ new Test1953(canRunClassLoadTests, (x)-> {}).runTests();
+ }
+
+ // This entrypoint is used by CTS only. */
+ public static void run() throws Exception {
+ /* TODO: Due to the way that CTS tests are verified we cannot run class-load-tests since the
+ * verifier will be delayed until runtime and then load the classes all at once. This
+ * makes the test impossible to run.
+ */
+ run(/*canRunClassLoadTests*/ false);
+ }
+
+ public Test1953(boolean canRunClassLoadTests, Consumer<TestRunnable> preTest) {
+ this.canRunClassLoadTests = canRunClassLoadTests;
+ this.preTest = preTest;
+ }
+
+ private Consumer<TestRunnable> preTest;
+
+ public void runTests() throws Exception {
+ setupTest();
+
+ final Method calledFunction = StandardTestObject.class.getDeclaredMethod("calledFunction");
+ final Method doNothingMethod = Test1953.class.getDeclaredMethod("doNothing");
+ // Add a breakpoint on the second line after the start of the function
+ final int line = Breakpoint.locationToLine(calledFunction, 0) + 2;
+ final long loc = Breakpoint.lineToLocation(calledFunction, line);
+ System.out.println("Test stopped using breakpoint");
+ runTestOn(new StandardTestObject(),
+ (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
+ Test1953::clearSuspendBreakpointFor);
+
+ final Method syncFunctionCalledFunction =
+ SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
+ // Add a breakpoint on the second line after the start of the function
+ // Annoyingly r8 generally has the first instruction (a monitor enter) not be marked as being
+ // on any line but javac has it marked as being on the first line of the function. Just use the
+ // second entry on the line-number table to get the breakpoint. This should be good for both.
+ final long syncFunctionLoc =
+ Breakpoint.getLineNumberTable(syncFunctionCalledFunction)[1].location;
+ System.out.println("Test stopped using breakpoint with declared synchronized function");
+ runTestOn(new SynchronizedFunctionTestObject(),
+ (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
+ Test1953::clearSuspendBreakpointFor);
+
+ final Method syncCalledFunction =
+ SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
+ // Add a breakpoint on the second line after the start of the function
+ final int syncLine = Breakpoint.locationToLine(syncCalledFunction, 0) + 3;
+ final long syncLoc = Breakpoint.lineToLocation(syncCalledFunction, syncLine);
+ System.out.println("Test stopped using breakpoint with synchronized block");
+ runTestOn(new SynchronizedTestObject(),
+ (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
+ Test1953::clearSuspendBreakpointFor);
+
+ System.out.println("Test stopped on single step");
+ runTestOn(new StandardTestObject(),
+ (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
+ Test1953::clearSuspendSingleStepFor);
+
+ final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
+ System.out.println("Test stopped on field access");
+ runTestOn(new FieldBasedTestObject(),
+ (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
+ Test1953::clearFieldSuspendFor);
+
+ System.out.println("Test stopped on field modification");
+ runTestOn(new FieldBasedTestObject(),
+ (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
+ Test1953::clearFieldSuspendFor);
+
+ System.out.println("Test stopped during Method Exit of doNothing");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ false, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ // NB We need another test to make sure the MethodEntered event is triggered twice.
+ System.out.println("Test stopped during Method Enter of doNothing");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ true, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ System.out.println("Test stopped during Method Exit of calledFunction");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ false, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ System.out.println("Test stopped during Method Enter of calledFunction");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ true, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ final Method exceptionOnceCalledMethod =
+ ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
+ System.out.println("Test stopped during Method Exit due to exception thrown in same function");
+ runTestOn(new ExceptionOnceObject(/*throwInSub*/ false),
+ (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
+ runTestOn(new ExceptionOnceObject(/*throwInSub*/ true),
+ (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
+ Test1953::clearSuspendMethodEvent);
+
+ System.out.println("Test stopped during notifyFramePop without exception on pop of calledFunction");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendPopFrameEvent(1, doNothingMethod, thr),
+ Test1953::clearSuspendPopFrameEvent);
+
+ System.out.println("Test stopped during notifyFramePop without exception on pop of doNothing");
+ runTestOn(new StandardTestObject(false),
+ (thr) -> setupSuspendPopFrameEvent(0, doNothingMethod, thr),
+ Test1953::clearSuspendPopFrameEvent);
+
+ final Method exceptionThrowCalledMethod =
+ ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
+ System.out.println("Test stopped during notifyFramePop with exception on pop of calledFunction");
+ runTestOn(new ExceptionThrowTestObject(false),
+ (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
+ Test1953::clearSuspendPopFrameEvent);
+
+ final Method exceptionCatchThrowMethod =
+ ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
+ System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
+ runTestOn(new ExceptionCatchTestObject(),
+ (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
+ Test1953::clearSuspendPopFrameEvent);
+
+ System.out.println("Test stopped during ExceptionCatch event of calledFunction " +
+ "(catch in called function, throw in called function)");
+ runTestOn(new ExceptionThrowTestObject(true),
+ (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ true, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ final Method exceptionCatchCalledMethod =
+ ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
+ System.out.println("Test stopped during ExceptionCatch event of calledFunction " +
+ "(catch in called function, throw in subroutine)");
+ runTestOn(new ExceptionCatchTestObject(),
+ (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /*catch*/ true, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ System.out.println("Test stopped during Exception event of calledFunction " +
+ "(catch in calling function)");
+ runTestOn(new ExceptionThrowTestObject(false),
+ (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ System.out.println("Test stopped during Exception event of calledFunction " +
+ "(catch in called function)");
+ runTestOn(new ExceptionThrowTestObject(true),
+ (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ final Method exceptionThrowFarCalledMethod =
+ ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
+ System.out.println("Test stopped during Exception event of calledFunction " +
+ "(catch in parent of calling function)");
+ runTestOn(new ExceptionThrowFarTestObject(false),
+ (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ System.out.println("Test stopped during Exception event of calledFunction " +
+ "(catch in called function)");
+ runTestOn(new ExceptionThrowFarTestObject(true),
+ (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
+ Test1953::clearSuspendExceptionEvent);
+
+ // These tests are disabled for either the RI (b/116003018) or for jvmti-stress. For the
+ // later it is due to the additional agent causing classes to be loaded earlier as it forces
+ // deeper verification during class redefinition, causing failures.
+ // NB the agent is prevented from popping frames in either of these events in ART. See
+ // b/117615146 for more information about this restriction.
+ if (canRunClassLoadTests && CanRunClassLoadingTests()) {
+ // This test doesn't work on RI since the RI disallows use of PopFrame during a ClassLoad
+ // event. See b/116003018 for more information.
+ System.out.println("Test stopped during a ClassLoad event.");
+ runTestOn(new ClassLoadObject(),
+ (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_LOAD, ClassLoadObject.CLASS_NAMES, thr),
+ Test1953::clearSuspendClassEvent);
+
+ // The RI handles a PopFrame during a ClassPrepare event incorrectly. See b/116003018 for
+ // more information.
+ System.out.println("Test stopped during a ClassPrepare event.");
+ runTestOn(new ClassLoadObject(),
+ (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_PREPARE,
+ ClassLoadObject.CLASS_NAMES,
+ thr),
+ Test1953::clearSuspendClassEvent);
+ }
+ System.out.println("Test stopped during random Suspend.");
+ final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
+ runTestOn(
+ sso,
+ new TestSuspender() {
+ public void setup(Thread thr) { }
+ public void waitForSuspend(Thread thr) {
+ while (!sso.is_spinning) {}
+ Suspension.suspend(thr);
+ }
+ public void cleanup(Thread thr) {
+ sso.stop_spinning = true;
+ }
+ });
+
+ final Method redefineCalledFunction =
+ RedefineTestObject.class.getDeclaredMethod("calledFunction");
+ final int redefLine = Breakpoint.locationToLine(redefineCalledFunction, 0) + 2;
+ final long redefLoc = Breakpoint.lineToLocation(redefineCalledFunction, redefLine);
+ System.out.println("Test redefining frame being popped.");
+ runTestOn(new RedefineTestObject(),
+ (thr) -> setupSuspendBreakpointFor(redefineCalledFunction, redefLoc, thr),
+ (thr) -> {
+ clearSuspendBreakpointFor(thr);
+ Redefinition.doCommonClassRedefinition(RedefineTestObject.class,
+ RedefineTestObject.CLASS_BYTES,
+ RedefineTestObject.DEX_BYTES);
+ });
+
+ System.out.println("Test stopped during a native method fails");
+ runTestOn(new NativeCalledObject(),
+ Test1953::setupWaitForNativeCall,
+ Test1953::clearWaitForNativeCall);
+
+ System.out.println("Test stopped in a method called by native fails");
+ final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
+ runTestOn(new NativeCallerObject(),
+ (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /*enter*/ false, thr),
+ Test1953::clearSuspendMethodEvent);
+ }
+
+ // Volatile is to prevent any future optimizations that could invalidate this test by doing
+ // constant propagation and eliminating the failing paths before the verifier is able to load the
+ // class.
+ static volatile boolean ranClassLoadTest = false;
+ static boolean classesPreverified = false;
+ private static final class RCLT0 { public void foo() {} }
+ private static final class RCLT1 { public void foo() {} }
+ // If classes are not preverified for some reason (interp-ac, no-image, etc) the verifier will
+ // actually load classes as it runs. This means that we cannot use the class-load tests as they
+ // are written. TODO Support this.
+ public boolean CanRunClassLoadingTests() {
+ if (ranClassLoadTest) {
+ return classesPreverified;
+ }
+ if (!ranClassLoadTest) {
+ // Only this will ever be executed.
+ new RCLT0().foo();
+ } else {
+ // This will never be executed. If classes are not preverified the verifier will load RCLT1
+ // when the enclosing method is run. This behavior makes the class-load/prepare test cases
+ // impossible to successfully run (they will deadlock).
+ new RCLT1().foo();
+ System.out.println("FAILURE: UNREACHABLE Location!");
+ }
+ classesPreverified = !isClassLoaded("Lart/Test1953$RCLT1;");
+ ranClassLoadTest = true;
+ return classesPreverified;
+ }
+
+ public static native boolean isClassLoaded(String name);
+
+ public static native void setupTest();
+ public static native void popFrame(Thread thr);
+
+ public static native void setupSuspendBreakpointFor(Executable meth, long loc, Thread thr);
+ public static native void clearSuspendBreakpointFor(Thread thr);
+
+ public static native void setupSuspendSingleStepAt(Executable meth, long loc, Thread thr);
+ public static native void clearSuspendSingleStepFor(Thread thr);
+
+ public static native void setupFieldSuspendFor(Class klass, Field f, boolean access, Thread thr);
+ public static native void clearFieldSuspendFor(Thread thr);
+
+ public static native void setupSuspendMethodEvent(Executable meth, boolean enter, Thread thr);
+ public static native void clearSuspendMethodEvent(Thread thr);
+
+ public static native void setupSuspendExceptionEvent(
+ Executable meth, boolean is_catch, Thread thr);
+ public static native void clearSuspendExceptionEvent(Thread thr);
+
+ public static native void setupSuspendPopFrameEvent(
+ int offset, Executable breakpointFunction, Thread thr);
+ public static native void clearSuspendPopFrameEvent(Thread thr);
+
+ public static final int EVENT_TYPE_CLASS_LOAD = 55;
+ public static final int EVENT_TYPE_CLASS_PREPARE = 56;
+ public static native void setupSuspendClassEvent(
+ int eventType, String[] interestingNames, Thread thr);
+ public static native void clearSuspendClassEvent(Thread thr);
+
+ public static native void setupWaitForNativeCall(Thread thr);
+ public static native void clearWaitForNativeCall(Thread thr);
+
+ public static native void waitForSuspendHit(Thread thr);
+}
diff --git a/test/1954-pop-frame-jit/check b/test/1954-pop-frame-jit/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1954-pop-frame-jit/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+ (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1954-pop-frame-jit/expected.txt b/test/1954-pop-frame-jit/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1954-pop-frame-jit/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1954-pop-frame-jit/info.txt b/test/1954-pop-frame-jit/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1954-pop-frame-jit/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1954-pop-frame-jit/jvm-expected.patch b/test/1954-pop-frame-jit/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1954-pop-frame-jit/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1954-pop-frame-jit/run b/test/1954-pop-frame-jit/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1954-pop-frame-jit/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1954-pop-frame-jit/src/Main.java b/test/1954-pop-frame-jit/src/Main.java
new file mode 100644
index 0000000..12defcd
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+ public Main(boolean run_class_load_tests) {
+ super(run_class_load_tests, (testObj) -> {
+ try {
+ // Make sure everything is jitted in the method. We do this before calling setup since the
+ // suspend setup might make it impossible to jit the methods (by setting breakpoints or
+ // something).
+ for (Method m : testObj.getClass().getMethods()) {
+ if ((m.getModifiers() & Modifier.NATIVE) == 0 &&
+ !m.getName().startsWith("$noprecompile$")) {
+ ensureMethodJitCompiled(m);
+ }
+ }
+ } catch (Exception e) {}
+ });
+ }
+
+ public static void main(String[] args) throws Exception {
+ new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+ }
+
+ public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1954-pop-frame-jit/src/art/Breakpoint.java b/test/1954-pop-frame-jit/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1954-pop-frame-jit/src/art/Redefinition.java b/test/1954-pop-frame-jit/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1954-pop-frame-jit/src/art/StackTrace.java b/test/1954-pop-frame-jit/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+ public static class StackFrameData {
+ public final Thread thr;
+ public final Executable method;
+ public final long current_location;
+ public final int depth;
+
+ public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+ this.thr = thr;
+ this.method = e;
+ this.current_location = loc;
+ this.depth = depth;
+ }
+ @Override
+ public String toString() {
+ return String.format(
+ "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+ this.thr,
+ this.method,
+ this.current_location,
+ this.depth);
+ }
+ }
+
+ public static native int GetStackDepth(Thread thr);
+
+ private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+ public static StackFrameData[] GetStackTrace(Thread thr) {
+ // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+ // suspended. The spec says that not being suspended is fine but since we want this to be
+ // consistent we will suspend for the RI.
+ boolean suspend_thread =
+ !System.getProperty("java.vm.name").equals("Dalvik") &&
+ !thr.equals(Thread.currentThread()) &&
+ !Suspension.isSuspended(thr);
+ if (suspend_thread) {
+ Suspension.suspend(thr);
+ }
+ StackFrameData[] out = nativeGetStackTrace(thr);
+ if (suspend_thread) {
+ Suspension.resume(thr);
+ }
+ return out;
+ }
+}
+
diff --git a/test/1954-pop-frame-jit/src/art/Suspension.java b/test/1954-pop-frame-jit/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1954-pop-frame-jit/src/art/Test1953.java b/test/1954-pop-frame-jit/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/check b/test/1955-pop-frame-jit-called/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+ (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1955-pop-frame-jit-called/expected.txt b/test/1955-pop-frame-jit-called/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1955-pop-frame-jit-called/info.txt b/test/1955-pop-frame-jit-called/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1955-pop-frame-jit-called/jvm-expected.patch b/test/1955-pop-frame-jit-called/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1955-pop-frame-jit-called/run b/test/1955-pop-frame-jit-called/run
new file mode 100755
index 0000000..2984461
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/run
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+# The jitthreshold prevents the jit from compiling anything except those which
+# we explicitly request.
+./default-run "$@" --android-runtime-option -Xjitthreshold:1000 --jvmti $ARGS
diff --git a/test/1955-pop-frame-jit-called/src/Main.java b/test/1955-pop-frame-jit-called/src/Main.java
new file mode 100644
index 0000000..30a42ea
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+ public Main(boolean run_class_load_tests) {
+ super(run_class_load_tests, (testObj) -> {
+ try {
+ // Make sure the called method is jitted
+ ensureMethodJitCompiled(testObj.getCalledMethod());
+ } catch (Exception e) {}
+ });
+ }
+
+ public static void main(String[] args) throws Exception {
+ new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+ }
+
+ public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/Breakpoint.java b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1955-pop-frame-jit-called/src/art/Redefinition.java b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/StackTrace.java b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+ public static class StackFrameData {
+ public final Thread thr;
+ public final Executable method;
+ public final long current_location;
+ public final int depth;
+
+ public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+ this.thr = thr;
+ this.method = e;
+ this.current_location = loc;
+ this.depth = depth;
+ }
+ @Override
+ public String toString() {
+ return String.format(
+ "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+ this.thr,
+ this.method,
+ this.current_location,
+ this.depth);
+ }
+ }
+
+ public static native int GetStackDepth(Thread thr);
+
+ private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+ public static StackFrameData[] GetStackTrace(Thread thr) {
+ // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+ // suspended. The spec says that not being suspended is fine but since we want this to be
+ // consistent we will suspend for the RI.
+ boolean suspend_thread =
+ !System.getProperty("java.vm.name").equals("Dalvik") &&
+ !thr.equals(Thread.currentThread()) &&
+ !Suspension.isSuspended(thr);
+ if (suspend_thread) {
+ Suspension.suspend(thr);
+ }
+ StackFrameData[] out = nativeGetStackTrace(thr);
+ if (suspend_thread) {
+ Suspension.resume(thr);
+ }
+ return out;
+ }
+}
+
diff --git a/test/1955-pop-frame-jit-called/src/art/Suspension.java b/test/1955-pop-frame-jit-called/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1955-pop-frame-jit-called/src/art/Test1953.java b/test/1955-pop-frame-jit-called/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/check b/test/1956-pop-frame-jit-calling/check
new file mode 100755
index 0000000..10b87cc
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+ (patch -p0 expected.txt < jvm-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1956-pop-frame-jit-calling/expected.txt b/test/1956-pop-frame-jit-calling/expected.txt
new file mode 100644
index 0000000..a20a045
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/expected.txt
@@ -0,0 +1,118 @@
+Test stopped using breakpoint
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with declared synchronized function
+Single call with PopFrame on SynchronizedFunctionTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedFunctionTestObject { cnt: 2 } base-call count: 1
+Test stopped using breakpoint with synchronized block
+Single call with PopFrame on SynchronizedTestObject { cnt: 0 } base-call-count: 0
+result is SynchronizedTestObject { cnt: 2 } base-call count: 1
+Test stopped on single step
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped on field access
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped on field modification
+Single call with PopFrame on FieldBasedTestObject { cnt: 0, TARGET_FIELD: 0 } base-call-count: 0
+result is FieldBasedTestObject { cnt: 2, TARGET_FIELD: 10 } base-call count: 1
+Test stopped during Method Exit of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Enter of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during Method Enter of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during Method Exit due to exception thrown in same function
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: false } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: false } base-call count: 1
+Test stopped during Method Exit due to exception thrown in subroutine
+Single call with PopFrame on ExceptionOnceObject { cnt: 0, throwInSub: true } base-call-count: 0
+result is ExceptionOnceObject { cnt: 2, throwInSub: true } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of calledFunction
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop without exception on pop of doNothing
+Single call with PopFrame on StandardTestObject { cnt: 0 } base-call-count: 0
+result is StandardTestObject { cnt: 1 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of calledFunction
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during notifyFramePop with exception on pop of doThrow
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 1 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+Single call with PopFrame on ExceptionCatchTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionCatchTestObject$TestError caught in called function.
+result is ExceptionCatchTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in calling function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError thrown and caught!
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowTestObject$TestError caught in same function.
+result is ExceptionThrowTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError thrown and caught!
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during Exception event of calledFunction (catch in called function)
+Single call with PopFrame on ExceptionThrowFarTestObject { cnt: 0 } base-call-count: 0
+art.Test1953$ExceptionThrowFarTestObject$TestError caught in same function.
+result is ExceptionThrowFarTestObject { cnt: 2 } base-call count: 1
+Test stopped during a ClassLoad event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC0.foo == 1
+result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+Test stopped during a ClassPrepare event.
+Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+TC1.foo == 2
+result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
+Test stopped during random Suspend.
+Single call with PopFrame on SuspendSuddenlyObject { cnt: 0 } base-call-count: 0
+result is SuspendSuddenlyObject { cnt: 2 } base-call count: 1
+Test redefining frame being popped.
+Single call with PopFrame on RedefineTestObject { states: [] current: ORIGINAL } base-call-count: 0
+result is RedefineTestObject { states: [ORIGINAL, REDEFINED] current: REDEFINED } base-call count: 1
+Test stopped during a native method fails
+Single call with PopFrame on NativeCalledObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCalledObject { cnt: 1 } base-call count: 1
+Test stopped in a method called by native fails
+Single call with PopFrame on NativeCallerObject { cnt: 0 } base-call-count: 0
+Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+ art.Test1953.popFrame(Native Method)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTestOn(Test1953.java)
+ art.Test1953.runTests(Test1953.java)
+ <Additional frames hidden>
+result is NativeCallerObject { cnt: 1 } base-call count: 1
diff --git a/test/1956-pop-frame-jit-calling/info.txt b/test/1956-pop-frame-jit-calling/info.txt
new file mode 100644
index 0000000..b5eb546
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/info.txt
@@ -0,0 +1,7 @@
+Test basic JVMTI breakpoint functionality.
+
+This test places a breakpoint on the first instruction of a number of functions
+that are entered in every way possible for the given class of method.
+
+It also tests that breakpoints don't interfere with each other by having
+multiple breakpoints be set at once.
diff --git a/test/1956-pop-frame-jit-calling/jvm-expected.patch b/test/1956-pop-frame-jit-calling/jvm-expected.patch
new file mode 100644
index 0000000..718f8ad
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/jvm-expected.patch
@@ -0,0 +1,21 @@
+75,94d74
+< Test stopped during a ClassLoad event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 0} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC0.foo == 1
+< result is ClassLoadObject { cnt: 1, curClass: 1} base-call count: 1
+< Test stopped during a ClassPrepare event.
+< Single call with PopFrame on ClassLoadObject { cnt: 0, curClass: 1} base-call-count: 0
+< Failed to pop frame due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+< art.Test1953.popFrame(Native Method)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTestOn(Test1953.java)
+< art.Test1953.runTests(Test1953.java)
+< <Additional frames hidden>
+< TC1.foo == 2
+< result is ClassLoadObject { cnt: 1, curClass: 2} base-call count: 1
diff --git a/test/1956-pop-frame-jit-calling/run b/test/1956-pop-frame-jit-calling/run
new file mode 100755
index 0000000..2984461
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/run
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+ ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+# The jitthreshold prevents the jit from compiling anything except those which
+# we explicitly request.
+./default-run "$@" --android-runtime-option -Xjitthreshold:1000 --jvmti $ARGS
diff --git a/test/1956-pop-frame-jit-calling/src/Main.java b/test/1956-pop-frame-jit-calling/src/Main.java
new file mode 100644
index 0000000..c44e035
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+import java.time.Duration;
+
+import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Stack;
+import java.util.Vector;
+
+import java.util.function.Supplier;
+
+import art.*;
+
+public class Main extends Test1953 {
+ public Main(boolean run_class_load_tests) {
+ super(run_class_load_tests, (testObj) -> {
+ try {
+ // Make sure the calling method is jitted
+ ensureMethodJitCompiled(testObj.getCallingMethod());
+ } catch (Exception e) {}
+ });
+ }
+
+ public static void main(String[] args) throws Exception {
+ new Main(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS")).runTests();
+ }
+
+ public static native void ensureMethodJitCompiled(Method meth);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
new file mode 100644
index 0000000..bbb89f7
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.Objects;
+
+public class Breakpoint {
+ public static class Manager {
+ public static class BP {
+ public final Executable method;
+ public final long location;
+
+ public BP(Executable method) {
+ this(method, getStartLocation(method));
+ }
+
+ public BP(Executable method, long location) {
+ this.method = method;
+ this.location = location;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ return (other instanceof BP) &&
+ method.equals(((BP)other).method) &&
+ location == ((BP)other).location;
+ }
+
+ @Override
+ public String toString() {
+ return method.toString() + " @ " + getLine();
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(method, location);
+ }
+
+ public int getLine() {
+ try {
+ LineNumber[] lines = getLineNumberTable(method);
+ int best = -1;
+ for (LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+ }
+
+ private Set<BP> breaks = new HashSet<>();
+
+ public void setBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.add(b)) {
+ Breakpoint.setBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void setBreakpoint(Executable method, long location) {
+ setBreakpoints(new BP(method, location));
+ }
+
+ public void clearBreakpoints(BP... bs) {
+ for (BP b : bs) {
+ if (breaks.remove(b)) {
+ Breakpoint.clearBreakpoint(b.method, b.location);
+ }
+ }
+ }
+ public void clearBreakpoint(Executable method, long location) {
+ clearBreakpoints(new BP(method, location));
+ }
+
+ public void clearAllBreakpoints() {
+ clearBreakpoints(breaks.toArray(new BP[0]));
+ }
+ }
+
+ public static void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ Thread thr) {
+ startBreakpointWatch(methodClass, breakpointReached, false, thr);
+ }
+
+ /**
+ * Enables the trapping of breakpoint events.
+ *
+ * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
+ */
+ public static native void startBreakpointWatch(Class<?> methodClass,
+ Executable breakpointReached,
+ boolean allowRecursive,
+ Thread thr);
+ public static native void stopBreakpointWatch(Thread thr);
+
+ public static final class LineNumber implements Comparable<LineNumber> {
+ public final long location;
+ public final int line;
+
+ private LineNumber(long loc, int line) {
+ this.location = loc;
+ this.line = line;
+ }
+
+ public boolean equals(Object other) {
+ return other instanceof LineNumber && ((LineNumber)other).line == line &&
+ ((LineNumber)other).location == location;
+ }
+
+ public int compareTo(LineNumber other) {
+ int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
+ if (v != 0) {
+ return v;
+ } else {
+ return Long.valueOf(location).compareTo(Long.valueOf(other.location));
+ }
+ }
+ }
+
+ public static native void setBreakpoint(Executable m, long loc);
+ public static void setBreakpoint(Executable m, LineNumber l) {
+ setBreakpoint(m, l.location);
+ }
+
+ public static native void clearBreakpoint(Executable m, long loc);
+ public static void clearBreakpoint(Executable m, LineNumber l) {
+ clearBreakpoint(m, l.location);
+ }
+
+ private static native Object[] getLineNumberTableNative(Executable m);
+ public static LineNumber[] getLineNumberTable(Executable m) {
+ Object[] nativeTable = getLineNumberTableNative(m);
+ long[] location = (long[])(nativeTable[0]);
+ int[] lines = (int[])(nativeTable[1]);
+ if (lines.length != location.length) {
+ throw new Error("Lines and locations have different lengths!");
+ }
+ LineNumber[] out = new LineNumber[lines.length];
+ for (int i = 0; i < lines.length; i++) {
+ out[i] = new LineNumber(location[i], lines[i]);
+ }
+ return out;
+ }
+
+ public static native long getStartLocation(Executable m);
+
+ public static int locationToLine(Executable m, long location) {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ int best = -1;
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.location > location) {
+ break;
+ } else {
+ best = l.line;
+ }
+ }
+ return best;
+ } catch (Exception e) {
+ return -1;
+ }
+ }
+
+ public static long lineToLocation(Executable m, int line) throws Exception {
+ try {
+ Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
+ for (Breakpoint.LineNumber l : lines) {
+ if (l.line == line) {
+ return l.location;
+ }
+ }
+ throw new Exception("Unable to find line " + line + " in " + m);
+ } catch (Exception e) {
+ throw new Exception("Unable to get line number info for " + m, e);
+ }
+ }
+}
+
diff --git a/test/1956-pop-frame-jit-calling/src/art/Redefinition.java b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
new file mode 100644
index 0000000..56d2938
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+ public static final class CommonClassDefinition {
+ public final Class<?> target;
+ public final byte[] class_file_bytes;
+ public final byte[] dex_file_bytes;
+
+ public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+ this.target = target;
+ this.class_file_bytes = class_file_bytes;
+ this.dex_file_bytes = dex_file_bytes;
+ }
+ }
+
+ // A set of possible test configurations. Test should set this if they need to.
+ // This must be kept in sync with the defines in ti-agent/common_helper.cc
+ public static enum Config {
+ COMMON_REDEFINE(0),
+ COMMON_RETRANSFORM(1),
+ COMMON_TRANSFORM(2);
+
+ private final int val;
+ private Config(int val) {
+ this.val = val;
+ }
+ }
+
+ public static void setTestConfiguration(Config type) {
+ nativeSetTestConfiguration(type.val);
+ }
+
+ private static native void nativeSetTestConfiguration(int type);
+
+ // Transforms the class
+ public static native void doCommonClassRedefinition(Class<?> target,
+ byte[] classfile,
+ byte[] dexfile);
+
+ public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+ ArrayList<Class<?>> classes = new ArrayList<>();
+ ArrayList<byte[]> class_files = new ArrayList<>();
+ ArrayList<byte[]> dex_files = new ArrayList<>();
+
+ for (CommonClassDefinition d : defs) {
+ classes.add(d.target);
+ class_files.add(d.class_file_bytes);
+ dex_files.add(d.dex_file_bytes);
+ }
+ doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+ class_files.toArray(new byte[0][]),
+ dex_files.toArray(new byte[0][]));
+ }
+
+ public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+ for (CommonClassDefinition d : defs) {
+ addCommonTransformationResult(d.target.getCanonicalName(),
+ d.class_file_bytes,
+ d.dex_file_bytes);
+ }
+ }
+
+ public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+ byte[][] classfiles,
+ byte[][] dexfiles);
+ public static native void doCommonClassRetransformation(Class<?>... target);
+ public static native void setPopRetransformations(boolean pop);
+ public static native void popTransformationFor(String name);
+ public static native void enableCommonRetransformation(boolean enable);
+ public static native void addCommonTransformationResult(String target_name,
+ byte[] class_bytes,
+ byte[] dex_bytes);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/StackTrace.java b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
new file mode 100644
index 0000000..2ea2f20
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Executable;
+
+public class StackTrace {
+ public static class StackFrameData {
+ public final Thread thr;
+ public final Executable method;
+ public final long current_location;
+ public final int depth;
+
+ public StackFrameData(Thread thr, Executable e, long loc, int depth) {
+ this.thr = thr;
+ this.method = e;
+ this.current_location = loc;
+ this.depth = depth;
+ }
+ @Override
+ public String toString() {
+ return String.format(
+ "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
+ this.thr,
+ this.method,
+ this.current_location,
+ this.depth);
+ }
+ }
+
+ public static native int GetStackDepth(Thread thr);
+
+ private static native StackFrameData[] nativeGetStackTrace(Thread thr);
+
+ public static StackFrameData[] GetStackTrace(Thread thr) {
+ // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
+ // suspended. The spec says that not being suspended is fine but since we want this to be
+ // consistent we will suspend for the RI.
+ boolean suspend_thread =
+ !System.getProperty("java.vm.name").equals("Dalvik") &&
+ !thr.equals(Thread.currentThread()) &&
+ !Suspension.isSuspended(thr);
+ if (suspend_thread) {
+ Suspension.suspend(thr);
+ }
+ StackFrameData[] out = nativeGetStackTrace(thr);
+ if (suspend_thread) {
+ Suspension.resume(thr);
+ }
+ return out;
+ }
+}
+
diff --git a/test/1956-pop-frame-jit-calling/src/art/Suspension.java b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
new file mode 100644
index 0000000..16e62cc
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Test1953.java b/test/1956-pop-frame-jit-calling/src/art/Test1953.java
new file mode 120000
index 0000000..f281434
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Test1953.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/Test1953.java
\ No newline at end of file
diff --git a/test/441-checker-inliner/src/Main.java b/test/441-checker-inliner/src/Main.java
index 3ccfce4..6c75962 100644
--- a/test/441-checker-inliner/src/Main.java
+++ b/test/441-checker-inliner/src/Main.java
@@ -135,11 +135,7 @@
}
}
- /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (before)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.returnAbs(int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.returnAbs(int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathAbsInt
/// CHECK-DAG: Return [<<Result>>]
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 5fc5464..eb81f3b 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -34,7 +34,7 @@
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index f867bdf..80abb3b 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -32,7 +32,7 @@
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 7eb3fe5..ddc86df 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -33,7 +33,7 @@
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 58ffe04..905d8e6 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -32,7 +32,7 @@
TestVisitor(Thread* thread, Context* context) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index e16fa69..b1bc51e 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -26,7 +26,7 @@
/// CHECK-START: void Main.invokeStaticInlined() builder (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
/// CHECK-DAG: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK-DAG: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK-DAG: InvokeStaticOrDirect [{{([ij]\d+,)?}}<<ClinitCheck>>]
/// CHECK-START: void Main.invokeStaticInlined() inliner (after)
/// CHECK-DAG: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
@@ -69,7 +69,7 @@
/// CHECK-START: void Main.invokeStaticNotInlined() builder (after)
/// CHECK: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
/// CHECK: <<ClinitCheck:l\d+>> ClinitCheck [<<LoadClass>>]
- /// CHECK: InvokeStaticOrDirect [{{[ij]\d+}},<<ClinitCheck>>]
+ /// CHECK: InvokeStaticOrDirect [{{([ij]\d+,)?}}<<ClinitCheck>>]
/// CHECK-START: void Main.invokeStaticNotInlined() inliner (after)
/// CHECK: <<LoadClass:l\d+>> LoadClass gen_clinit_check:false
diff --git a/test/510-checker-try-catch/smali/Runtime.smali b/test/510-checker-try-catch/smali/Runtime.smali
index 19b43a3..d080a0c 100644
--- a/test/510-checker-try-catch/smali/Runtime.smali
+++ b/test/510-checker-try-catch/smali/Runtime.smali
@@ -549,6 +549,76 @@
.end array-data
.end method
+
+## CHECK-START-{ARM,ARM64}: int Runtime.testIntAddressCatch(int, int[]) GVN$after_arch (after)
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Offset:i\d+>> IntConstant 12
+## CHECK-DAG: <<IndexParam:i\d+>> ParameterValue
+## CHECK-DAG: <<Array:l\d+>> ParameterValue
+
+## CHECK-DAG: <<NullCh1:l\d+>> NullCheck [<<Array>>]
+## CHECK-DAG: <<Length:i\d+>> ArrayLength
+## CHECK-DAG: <<BoundsCh1:i\d+>> BoundsCheck [<<IndexParam>>,<<Length>>]
+## CHECK-DAG: <<IntAddr1:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+## CHECK-DAG: ArrayGet [<<IntAddr1>>,<<BoundsCh1>>]
+## CHECK-DAG: TryBoundary
+
+## CHECK-DAG: <<Xplus1:i\d+>> Add [<<IndexParam>>,<<Const1>>]
+## CHECK-DAG: <<BoundsCh2:i\d+>> BoundsCheck [<<Xplus1>>,<<Length>>]
+## CHECK-DAG: ArrayGet [<<IntAddr1>>,<<BoundsCh2>>]
+## CHECK-DAG: TryBoundary
+
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Xplus1>>]
+## CHECK-DAG: <<Phiplus1:i\d+>> Add [<<Phi>>,<<Const1>>]
+## CHECK-DAG: <<BoundsCh3:i\d+>> BoundsCheck [<<Phiplus1>>,<<Length>>]
+## CHECK-DAG: <<IntAddr3:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+## CHECK-DAG: ArrayGet [<<IntAddr3>>,<<BoundsCh3>>]
+
+## CHECK-START-{ARM,ARM64}: int Runtime.testIntAddressCatch(int, int[]) GVN$after_arch (after)
+## CHECK: NullCheck
+## CHECK-NOT: NullCheck
+
+## CHECK-START-{ARM,ARM64}: int Runtime.testIntAddressCatch(int, int[]) GVN$after_arch (after)
+## CHECK: IntermediateAddress
+## CHECK: IntermediateAddress
+## CHECK-NOT: IntermediateAddress
+
+## CHECK-START-{ARM,ARM64}: int Runtime.testIntAddressCatch(int, int[]) GVN$after_arch (after)
+## CHECK: BoundsCheck
+## CHECK: BoundsCheck
+## CHECK: BoundsCheck
+## CHECK-NOT: BoundsCheck
+
+## CHECK-START-{ARM,ARM64}: int Runtime.testIntAddressCatch(int, int[]) GVN$after_arch (after)
+## CHECK: ArrayGet
+## CHECK: ArrayGet
+## CHECK: ArrayGet
+## CHECK-NOT: ArrayGet
+.method public static testIntAddressCatch(I[I)I
+ .registers 4
+ aget v0, p1, p0
+ add-int v1, v0, v0
+
+ :try_start
+ const/4 v0, 0x1
+ add-int p0, p0, v0
+ aget v0, p1, p0
+
+ :try_end
+ .catch Ljava/lang/ArithmException; {:try_start .. :try_end} :catch_block
+
+ :return
+ add-int v1, v1, v0
+ return v1
+
+ :catch_block
+ const/4 v0, 0x1
+ add-int p0, p0, v0
+ aget v0, p1, p0
+
+ goto :return
+.end method
+
.field public static intArray:[I
.field public static longArray:[J
.field public static floatArray:[F
diff --git a/test/510-checker-try-catch/src/Main.java b/test/510-checker-try-catch/src/Main.java
index d6dcd30..18658cd 100644
--- a/test/510-checker-try-catch/src/Main.java
+++ b/test/510-checker-try-catch/src/Main.java
@@ -37,6 +37,114 @@
public int expected;
}
+ // Test that IntermediateAddress instruction is not alive across BoundsCheck which can throw to
+ // a catch block.
+ //
+ /// CHECK-START-{ARM,ARM64}: void Main.boundsCheckAndCatch(int, int[], int[]) GVN$after_arch (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<Offset:i\d+>> IntConstant 12
+ /// CHECK-DAG: <<IndexParam:i\d+>> ParameterValue
+ /// CHECK-DAG: <<ArrayA:l\d+>> ParameterValue
+ /// CHECK-DAG: <<ArrayB:l\d+>> ParameterValue
+ //
+
+ /// CHECK-DAG: <<NullCh1:l\d+>> NullCheck [<<ArrayA>>]
+ /// CHECK-DAG: <<LengthA:i\d+>> ArrayLength
+ /// CHECK-DAG: <<BoundsCh1:i\d+>> BoundsCheck [<<IndexParam>>,<<LengthA>>]
+ /// CHECK-DAG: <<IntAddr1:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr1>>,<<BoundsCh1>>,<<Const1>>]
+ /// CHECK-DAG: TryBoundary
+ //
+ /// CHECK-DAG: <<IntAddr2:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr2>>,<<BoundsCh1>>,<<Const2>>]
+ /// CHECK-DAG: <<NullChB:l\d+>> NullCheck [<<ArrayB>>]
+ /// CHECK-DAG: <<LengthB:i\d+>> ArrayLength
+ /// CHECK-DAG: <<BoundsChB:i\d+>> BoundsCheck [<<Const0>>,<<LengthB>>]
+ /// CHECK-DAG: <<GetB:i\d+>> ArrayGet [<<NullChB>>,<<BoundsChB>>]
+ /// CHECK-DAG: <<ZeroCheck:i\d+>> DivZeroCheck [<<IndexParam>>]
+ /// CHECK-DAG: <<Div:i\d+>> Div [<<GetB>>,<<ZeroCheck>>]
+ /// CHECK-DAG: <<Xplus1:i\d+>> Add [<<IndexParam>>,<<Const1>>]
+ /// CHECK-DAG: <<BoundsCh2:i\d+>> BoundsCheck [<<Xplus1>>,<<LengthA>>]
+ /// CHECK-DAG: <<IntAddr3:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr3>>,<<BoundsCh2>>,<<Div>>]
+ /// CHECK-DAG: TryBoundary
+ //
+ /// CHECK-DAG: ClearException
+ /// CHECK-DAG: <<IntAddr4:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr4>>,<<BoundsCh1>>,<<Const1>>]
+ //
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: IntermediateAddress
+
+ /// CHECK-START-{ARM,ARM64}: void Main.boundsCheckAndCatch(int, int[], int[]) GVN$after_arch (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2
+ /// CHECK-DAG: <<Offset:i\d+>> IntConstant 12
+ /// CHECK-DAG: <<IndexParam:i\d+>> ParameterValue
+ /// CHECK-DAG: <<ArrayA:l\d+>> ParameterValue
+ /// CHECK-DAG: <<ArrayB:l\d+>> ParameterValue
+ //
+ /// CHECK-DAG: <<NullCh1:l\d+>> NullCheck [<<ArrayA>>]
+ /// CHECK-DAG: <<LengthA:i\d+>> ArrayLength
+ /// CHECK-DAG: <<BoundsCh1:i\d+>> BoundsCheck [<<IndexParam>>,<<LengthA>>]
+ /// CHECK-DAG: <<IntAddr1:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr1>>,<<BoundsCh1>>,<<Const1>>]
+ /// CHECK-DAG: TryBoundary
+ //
+ /// CHECK-DAG: ArraySet [<<IntAddr1>>,<<BoundsCh1>>,<<Const2>>]
+ /// CHECK-DAG: <<NullChB:l\d+>> NullCheck [<<ArrayB>>]
+ /// CHECK-DAG: <<LengthB:i\d+>> ArrayLength
+ /// CHECK-DAG: <<BoundsChB:i\d+>> BoundsCheck [<<Const0>>,<<LengthB>>]
+ /// CHECK-DAG: <<GetB:i\d+>> ArrayGet [<<NullChB>>,<<BoundsChB>>]
+ /// CHECK-DAG: <<ZeroCheck:i\d+>> DivZeroCheck [<<IndexParam>>]
+ /// CHECK-DAG: <<Div:i\d+>> Div [<<GetB>>,<<ZeroCheck>>]
+ /// CHECK-DAG: <<Xplus1:i\d+>> Add [<<IndexParam>>,<<Const1>>]
+ /// CHECK-DAG: <<BoundsCh2:i\d+>> BoundsCheck [<<Xplus1>>,<<LengthA>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr1>>,<<BoundsCh2>>,<<Div>>]
+ /// CHECK-DAG: TryBoundary
+ //
+ /// CHECK-DAG: ClearException
+ /// CHECK-DAG: <<IntAddr4:i\d+>> IntermediateAddress [<<NullCh1>>,<<Offset>>]
+ /// CHECK-DAG: ArraySet [<<IntAddr4>>,<<BoundsCh1>>,<<Const1>>]
+ //
+ /// CHECK-NOT: NullCheck
+ /// CHECK-NOT: IntermediateAddress
+
+ // Make sure that BoundsCheck, DivZeroCheck and NullCheck don't stop IntermediateAddress sharing.
+ public static void boundsCheckAndCatch(int x, int[] a, int[] b) {
+ a[x] = 1;
+ try {
+ a[x] = 2;
+ a[x + 1] = b[0] / x;
+ } catch (Exception e) {
+ a[x] = 1;
+ }
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public final static int ARRAY_SIZE = 128;
+
+ public static void testBoundsCheckAndCatch() {
+ int[] a = new int[ARRAY_SIZE];
+ int[] b = new int[ARRAY_SIZE];
+
+ int index = ARRAY_SIZE - 2;
+ boundsCheckAndCatch(index, a, b);
+ expectEquals(2, a[index]);
+
+ index = ARRAY_SIZE - 1;
+ boundsCheckAndCatch(index, a, b);
+ expectEquals(1, a[index]);
+ }
+
public static void testMethod(String method) throws Exception {
Class<?> c = Class.forName("Runtime");
Method m = c.getMethod(method, boolean.class, boolean.class);
@@ -52,6 +160,14 @@
}
}
+ public static void testIntAddressCatch() throws Exception {
+ int[] a = new int[3];
+
+ Class<?> c = Class.forName("Runtime");
+ Method m = c.getMethod("testIntAddressCatch", int.class, Class.forName("[I"));
+ m.invoke(null, 0, a);
+ }
+
public static void main(String[] args) throws Exception {
testMethod("testUseAfterCatch_int");
testMethod("testUseAfterCatch_long");
@@ -64,5 +180,8 @@
testMethod("testCatchPhi_double");
testMethod("testCatchPhi_singleSlot");
testMethod("testCatchPhi_doubleSlot");
+
+ testBoundsCheckAndCatch();
+ testIntAddressCatch();
}
}
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index 935b378..f83c924 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -552,6 +552,28 @@
return (int)s;
}
+ //
+ // Check that IntermediateAddress can be shared across BoundsCheck, DivZeroCheck and NullCheck -
+ // instruction which have fatal slow paths.
+ //
+ /// CHECK-START-{ARM,ARM64}: void Main.checkGVNForFatalChecks(int, int, char[], int[]) GVN$after_arch (before)
+ /// CHECK: IntermediateAddress
+ /// CHECK: IntermediateAddress
+ //
+ /// CHECK-NOT: IntermediateAddress
+
+ /// CHECK-START-{ARM,ARM64}: void Main.checkGVNForFatalChecks(int, int, char[], int[]) GVN$after_arch (after)
+ /// CHECK: IntermediateAddress
+ //
+ /// CHECK-NOT: IntermediateAddress
+ public final static void checkGVNForFatalChecks(int begin, int end, char[] buf1, int[] buf2) {
+ buf1[begin] = 'a';
+ buf2[0] = begin / end;
+ buf1[end] = 'n';
+ }
+
+ public final static int ARRAY_SIZE = 128;
+
public static void main(String[] args) {
int[] array = {123, 456, 789};
@@ -575,5 +597,10 @@
assertIntEquals(2097152, canMergeAfterBCE2());
assertIntEquals(18, checkLongFloatDouble());
+
+ char[] c1 = new char[ARRAY_SIZE];
+ int[] i1 = new int[ARRAY_SIZE];
+ checkGVNForFatalChecks(1, 2, c1, i1);
+ assertIntEquals('n', c1[2]);
}
}
diff --git a/test/530-checker-peel-unroll/smali/PeelUnroll.smali b/test/530-checker-peel-unroll/smali/PeelUnroll.smali
new file mode 100644
index 0000000..6e09e92
--- /dev/null
+++ b/test/530-checker-peel-unroll/smali/PeelUnroll.smali
@@ -0,0 +1,232 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LPeelUnroll;
+
+.super Ljava/lang/Object;
+
+## CHECK-START: void PeelUnroll.unrollingWhile(int[]) loop_optimization (before)
+## CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+## CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
+## CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
+## CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+## CHECK-DAG: Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
+## CHECK-NOT: ArraySet loop:<<Loop>> outer_loop:none
+
+## CHECK-START: void PeelUnroll.unrollingWhile(int[]) loop_optimization (after)
+## CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+## CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
+## CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
+## CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<PhiSM:i\d+>> Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-DAG: <<AddIA:i\d+>> Add [<<AddI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<CheckA:z\d+>> GreaterThanOrEqual [<<AddI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<IfA:v\d+>> If [<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<RemA:i\d+>> Rem [<<AddIA>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NEA:z\d+>> NotEqual [<<RemA>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NEA>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddSA:i\d+>> Add [<<PhiSM>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: Phi [<<AddSA>>,<<PhiSM>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
+## CHECK-NOT: ArraySet loop:<<Loop>> outer_loop:none
+.method public static final unrollingWhile([I)V
+ .registers 5
+ .param p0, "a" # [I
+
+ .line 167
+ const/4 v0, 0x0
+
+ .line 168
+ .local v0, "i":I
+ const/16 v1, 0x80
+
+ .line 169
+ .local v1, "s":I
+ :goto_3
+ add-int/lit8 v2, v0, 0x1
+
+ .end local v0 # "i":I
+ .local v2, "i":I
+ const/16 v3, 0xffe
+
+ if-ge v0, v3, :cond_14
+
+ .line 170
+ rem-int/lit8 v0, v2, 0x2
+
+ if-nez v0, :cond_12
+
+ .line 171
+ add-int/lit8 v0, v1, 0x1
+
+ .end local v1 # "s":I
+ .local v0, "s":I
+ aput v1, p0, v2
+
+ .line 169
+ move v1, v0
+
+ .end local v2 # "i":I
+ .local v0, "i":I
+ .restart local v1 # "s":I
+ :cond_12
+ move v0, v2
+
+ goto :goto_3
+
+ .line 174
+ .end local v0 # "i":I
+ .restart local v2 # "i":I
+ :cond_14
+ return-void
+.end method
+
+
+## CHECK-START: int PeelUnroll.unrollingWhileLiveOuts(int[]) loop_optimization (before)
+## CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+## CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
+## CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
+## CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+## CHECK-DAG: Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-NOT: ArrayGet
+## CHECK-NOT: ArraySet
+
+## CHECK-START: int PeelUnroll.unrollingWhileLiveOuts(int[]) loop_optimization (after)
+## CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+## CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+## CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
+## CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
+## CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+## CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<PhiSM:i\d+>> Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-DAG: <<AddIA:i\d+>> Add [<<AddI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<CheckA:z\d+>> GreaterThanOrEqual [<<AddI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<IfA:v\d+>> If [<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<RemA:i\d+>> Rem [<<AddIA>>,<<Const2>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<NEA:z\d+>> NotEqual [<<RemA>>,<<Const0>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: If [<<NEA>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: <<AddSA:i\d+>> Add [<<PhiSM>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>] loop:<<Loop>> outer_loop:none
+## CHECK-DAG: Phi [<<AddSA>>,<<PhiSM>>] loop:<<Loop>> outer_loop:none
+
+## CHECK-DAG: <<RetPhi:i\d+>> Phi [<<PhiS>>,<<PhiSM>>] loop:none
+## CHECK-DAG: Return [<<RetPhi>>] loop:none
+
+## CHECK-NOT: ArrayGet
+## CHECK-NOT: ArraySet
+.method public static final unrollingWhileLiveOuts([I)I
+ .registers 5
+ .param p0, "a" # [I
+
+ .line 598
+ const/4 v0, 0x0
+
+ .line 599
+ .local v0, "i":I
+ const/16 v1, 0x80
+
+ .line 600
+ .local v1, "s":I
+ :goto_3
+ add-int/lit8 v2, v0, 0x1
+
+ .end local v0 # "i":I
+ .local v2, "i":I
+ const/16 v3, 0xffe
+
+ if-ge v0, v3, :cond_14
+
+ .line 601
+ rem-int/lit8 v0, v2, 0x2
+
+ if-nez v0, :cond_12
+
+ .line 602
+ add-int/lit8 v0, v1, 0x1
+
+ .end local v1 # "s":I
+ .local v0, "s":I
+ aput v1, p0, v2
+
+ .line 600
+ move v1, v0
+
+ .end local v2 # "i":I
+ .local v0, "i":I
+ .restart local v1 # "s":I
+ :cond_12
+ move v0, v2
+
+ goto :goto_3
+
+ .line 605
+ .end local v0 # "i":I
+ .restart local v2 # "i":I
+ :cond_14
+ return v1
+.end method
+
diff --git a/test/530-checker-peel-unroll/src/Main.java b/test/530-checker-peel-unroll/src/Main.java
index 4d81440..aee32b7 100644
--- a/test/530-checker-peel-unroll/src/Main.java
+++ b/test/530-checker-peel-unroll/src/Main.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+import java.lang.reflect.Method;
+
//
// Test loop optimizations, in particular scalar loop peeling and unrolling.
public class Main {
@@ -110,69 +112,6 @@
}
}
- /// CHECK-START: void Main.unrollingWhile(int[]) loop_optimization (before)
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
- /// CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
- /// CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-NOT: ArraySet loop:<<Loop>> outer_loop:none
-
- /// CHECK-START: void Main.unrollingWhile(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
- /// CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
- /// CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<PhiSM:i\d+>> Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-DAG: <<AddIA:i\d+>> Add [<<AddI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<CheckA:z\d+>> GreaterThanOrEqual [<<AddI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<IfA:v\d+>> If [<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<RemA:i\d+>> Rem [<<AddIA>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NEA:z\d+>> NotEqual [<<RemA>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NEA>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddSA:i\d+>> Add [<<PhiSM>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Phi [<<AddSA>>,<<PhiSM>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-NOT: ArrayGet loop:<<Loop>> outer_loop:none
- /// CHECK-NOT: ArraySet loop:<<Loop>> outer_loop:none
- private static final void unrollingWhile(int[] a) {
- int i = 0;
- int s = 128;
- while (i++ < LENGTH - 2) {
- if (i % 2 == 0) {
- a[i] = s++;
- }
- }
- }
-
// Simple check that loop unrolling has happened.
//
/// CHECK-START: void Main.unrollingSwitch(int[]) loop_optimization (before)
@@ -538,73 +477,6 @@
return 1 / (s + t);
}
- /// CHECK-START: int Main.unrollingWhileLiveOuts(int[]) loop_optimization (before)
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
- /// CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
- /// CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-NOT: ArrayGet
- /// CHECK-NOT: ArraySet
-
- /// CHECK-START: int Main.unrollingWhileLiveOuts(int[]) loop_optimization (after)
- /// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
- /// CHECK-DAG: <<Const128:i\d+>> IntConstant 128 loop:none
- /// CHECK-DAG: <<Limit:i\d+>> IntConstant 4094 loop:none
- /// CHECK-DAG: <<PhiI:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<PhiS:i\d+>> Phi [<<Const128>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddI:i\d+>> Add [<<PhiI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Check:z\d+>> GreaterThanOrEqual [<<PhiI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<If:v\d+>> If [<<Check>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Rem:i\d+>> Rem [<<AddI>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NE:z\d+>> NotEqual [<<Rem>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NE>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddS:i\d+>> Add [<<PhiS>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiS>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<PhiSM:i\d+>> Phi [<<PhiS>>,<<AddS>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-DAG: <<AddIA:i\d+>> Add [<<AddI>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<CheckA:z\d+>> GreaterThanOrEqual [<<AddI>>,<<Limit>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<IfA:v\d+>> If [<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<RemA:i\d+>> Rem [<<AddIA>>,<<Const2>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<NEA:z\d+>> NotEqual [<<RemA>>,<<Const0>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: If [<<NEA>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<AddSA:i\d+>> Add [<<PhiSM>>,<<Const1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<PhiSM>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Phi [<<AddSA>>,<<PhiSM>>] loop:<<Loop>> outer_loop:none
- //
- /// CHECK-DAG: <<RetPhi:i\d+>> Phi [<<PhiS>>,<<PhiSM>>] loop:none
- /// CHECK-DAG: Return [<<RetPhi>>] loop:none
- //
- /// CHECK-NOT: ArrayGet
- /// CHECK-NOT: ArraySet
- private static final int unrollingWhileLiveOuts(int[] a) {
- int i = 0;
- int s = 128;
- while (i++ < LENGTH - 2) {
- if (i % 2 == 0) {
- a[i] = s++;
- }
- }
- return s;
- }
-
/// CHECK-START: int Main.unrollingLiveOutsNested(int[]) loop_optimization (before)
/// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
/// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
@@ -1113,7 +985,7 @@
}
}
- public void verifyUnrolling() {
+ public void verifyUnrolling() throws Exception {
initIntArray(a);
initIntArray(b);
@@ -1132,7 +1004,12 @@
unrollingTypeConversion(a, doubleArray);
unrollingCheckCast(a, new SubMain());
- unrollingWhile(a);
+ // Call unrollingWhile(a);
+ Class<?> c = Class.forName("PeelUnroll");
+ Method m = c.getMethod("unrollingWhile", Class.forName("[I"));
+ Object[] arguments = { a };
+ m.invoke(null, arguments);
+
unrollingLoadStoreElimination(a);
unrollingSwitch(a);
unrollingSwapElements(a);
@@ -1152,7 +1029,7 @@
expectEquals(expected, found);
}
- public void verifyPeeling() {
+ public void verifyPeeling() throws Exception {
expectEquals(1, peelingHoistOneControl(0)); // anything else loops
expectEquals(1, peelingHoistOneControl(0, 0));
expectEquals(1, peelingHoistOneControl(0, 1));
@@ -1176,7 +1053,13 @@
peelingBreakFromNest(a, true);
unrollingSimpleLiveOuts(a);
- unrollingWhileLiveOuts(a);
+
+ // Call unrollingWhileLiveOuts(a);
+ Class<?> c = Class.forName("PeelUnroll");
+ Method m = c.getMethod("unrollingWhileLiveOuts", Class.forName("[I"));
+ Object[] arguments = { a };
+ m.invoke(null, arguments);
+
unrollingLiveOutsNested(a);
int expected = 51565978;
@@ -1188,7 +1071,7 @@
expectEquals(expected, found);
}
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
Main obj = new Main();
obj.verifyUnrolling();
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index ce5602f..165f5bf 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -34,7 +34,7 @@
found_(false),
soa_(soa) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 746887f..0bceffd 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -41,10 +41,7 @@
return x;
}
- /// CHECK-START: int Main.testSimple(int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
-
- /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testSimple(int) sharpening (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testSimple(int) builder (after)
/// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (before)
@@ -59,11 +56,7 @@
return $noinline$foo(x);
}
- /// CHECK-START: int Main.testDiamond(boolean, int) sharpening (before)
- /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
- /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
-
- /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testDiamond(boolean, int) sharpening (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testDiamond(boolean, int) builder (after)
/// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
@@ -194,18 +187,12 @@
}
/// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) builder (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
-
- /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) sharpening (after)
/// CHECK: InvokeStaticOrDirect method_load_kind:BootImageRelRo
public static String $noinline$toHexString(int value) {
return Integer.toString(value, 16);
}
/// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) builder (after)
- /// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
-
- /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) sharpening (after)
/// CHECK: InvokeStaticOrDirect method_load_kind:BssEntry
/// CHECK-START-X86: java.lang.String Main.$noinline$toHexStringIndirect(int) pc_relative_fixups_x86 (before)
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 0fe39ee..4721eca 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -307,7 +307,6 @@
.end method
## CHECK-START: java.lang.String TestCase.loopAndStringInitAndPhi(byte[], boolean) register (after)
-## CHECK: NewInstance
## CHECK-NOT: NewInstance
## CHECK-DAG: <<Invoke1:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
## CHECK-DAG: <<Invoke2:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
@@ -337,3 +336,140 @@
return-object v0
.end method
+
+.method public static loopAndTwoStringInitAndPhi([BZZ)Ljava/lang/String;
+ .registers 6
+
+ new-instance v0, Ljava/lang/String;
+ new-instance v2, Ljava/lang/String;
+
+ if-nez p2, :allocate_other
+
+ # Loop
+ :loop_header
+ if-eqz p1, :loop_exit
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ goto :exit
+
+ :allocate_other
+
+ # Loop
+ :loop_header2
+ if-eqz p1, :loop_exit2
+ goto :loop_header2
+
+ :loop_exit2
+ const-string v1, "UTF8"
+ invoke-direct {v2, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ move-object v0, v2
+
+ :exit
+ return-object v0
+
+.end method
+
+# Regression test for a new string flowing into a catch phi.
+.method public static stringAndCatch([BZ)Ljava/lang/Object;
+ .registers 4
+
+ const v0, 0x0
+
+ :try_start_a
+ new-instance v0, Ljava/lang/String;
+
+ # Loop
+ :loop_header
+ if-eqz p1, :loop_exit
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ goto :exit
+ :try_end_a
+ .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+ :catch_a
+ # Initially, we create a catch phi with the potential uninitalized string, which used to
+ # trip the compiler. However, using that catch phi is an error caught by the verifier, so
+ # having the phi is benign.
+ const v0, 0x0
+
+ :exit
+ return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor.
+.method public static stringAndCatch2([BZ)Ljava/lang/Object;
+ .registers 4
+
+ const v0, 0x0
+ new-instance v0, Ljava/lang/String;
+
+ :try_start_a
+ const-string v1, "UTF8"
+ :try_end_a
+ .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+ :catch_a
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ return-object v0
+
+.end method
+
+# Same test as above, but with a catch phi being used by the string constructor and
+# a null test.
+.method public static stringAndCatch3([BZ)Ljava/lang/Object;
+ .registers 4
+
+ const v0, 0x0
+ new-instance v0, Ljava/lang/String;
+
+ :try_start_a
+ const-string v1, "UTF8"
+ :try_end_a
+ .catch Ljava/lang/Exception; {:try_start_a .. :try_end_a} :catch_a
+
+ :catch_a
+ if-eqz v0, :unexpected
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ goto :exit
+ :unexpected
+ const-string v0, "UTF8"
+ :exit
+ return-object v0
+
+.end method
+
+# Regression test that tripped the compiler.
+.method public static stringAndPhi([BZ)Ljava/lang/Object;
+ .registers 4
+
+ new-instance v0, Ljava/lang/String;
+ const-string v1, "UTF8"
+
+ :loop_header
+ if-nez p1, :unused
+ if-eqz p1, :invoke
+ goto :loop_header
+
+ :invoke
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ goto :exit
+
+ :unused
+ const-string v0, "UTF8"
+ if-nez p1, :exit
+ goto :unused
+
+ :exit
+ return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index df9e9dc..77a108f 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -133,6 +133,29 @@
result = (String) m.invoke(null, new Object[] { testData, false });
assertEqual(testString, result);
}
+ {
+ Method m =
+ c.getMethod("loopAndTwoStringInitAndPhi", byte[].class, boolean.class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, false, false });
+ assertEqual(testString, result);
+ result = (String) m.invoke(null, new Object[] { testData, false, true });
+ assertEqual(testString, result);
+ }
+ {
+ Method m = c.getMethod("stringAndCatch", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
+ {
+ Method m = c.getMethod("stringAndCatch2", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
+ {
+ Method m = c.getMethod("stringAndCatch3", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
}
public static boolean doThrow = false;
diff --git a/test/564-checker-bitcount/src/Main.java b/test/564-checker-bitcount/src/Main.java
index aad9689..e022d9d 100644
--- a/test/564-checker-bitcount/src/Main.java
+++ b/test/564-checker-bitcount/src/Main.java
@@ -21,7 +21,7 @@
// CHECK-DAG: popcnt
- /// CHECK-START: int Main.$noinline$BitCountBoolean(boolean) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountBoolean(boolean) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountBoolean(boolean x) {
@@ -29,7 +29,7 @@
return Integer.bitCount(x ? 1 : 0);
}
- /// CHECK-START: int Main.$noinline$BitCountByte(byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountByte(byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountByte(byte x) {
@@ -37,7 +37,7 @@
return Integer.bitCount(x);
}
- /// CHECK-START: int Main.$noinline$BitCountShort(short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountShort(short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountShort(short x) {
@@ -45,7 +45,7 @@
return Integer.bitCount(x);
}
- /// CHECK-START: int Main.$noinline$BitCountChar(char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountChar(char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountChar(char x) {
@@ -53,7 +53,7 @@
return Integer.bitCount(x);
}
- /// CHECK-START: int Main.$noinline$BitCountInt(int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountInt(int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountInt(int x) {
@@ -61,7 +61,7 @@
return Integer.bitCount(x);
}
- /// CHECK-START: int Main.$noinline$BitCountLong(long) intrinsics_recognition (after)
+ /// CHECK-START: int Main.$noinline$BitCountLong(long) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongBitCount
/// CHECK-DAG: Return [<<Result>>]
private static int $noinline$BitCountLong(long x) {
diff --git a/test/565-checker-rotate/smali/Main2.smali b/test/565-checker-rotate/smali/Main2.smali
index ca5027e..768c9d0 100644
--- a/test/565-checker-rotate/smali/Main2.smali
+++ b/test/565-checker-rotate/smali/Main2.smali
@@ -15,14 +15,13 @@
.class public LMain2;
.super Ljava/lang/Object;
-## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) intrinsics_recognition (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) builder (after)
## CHECK: <<ArgVal:z\d+>> ParameterValue
## CHECK: <<ArgDist:i\d+>> ParameterValue
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
-## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
## CHECK-DAG: Return [<<Result>>]
## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
@@ -91,15 +90,14 @@
goto :goto_3
.end method
-## CHECK-START: int Main2.rotateRightBoolean(boolean, int) intrinsics_recognition (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) builder (after)
## CHECK: <<ArgVal:z\d+>> ParameterValue
## CHECK: <<ArgDist:i\d+>> ParameterValue
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
-## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
-## CHECK-DAG: Return [<<Result>>]
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
+## CHECK-DAG: Return [<<Result>>]
## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier (after)
## CHECK: <<ArgVal:z\d+>> ParameterValue
diff --git a/test/565-checker-rotate/src-art/Main.java b/test/565-checker-rotate/src-art/Main.java
index b9e1315..867feb8 100644
--- a/test/565-checker-rotate/src-art/Main.java
+++ b/test/565-checker-rotate/src-art/Main.java
@@ -20,11 +20,10 @@
private static Class main2;
- /// CHECK-START: int Main.rotateLeftByte(byte, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateLeftByte(byte, int) builder (after)
/// CHECK: <<ArgVal:b\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
@@ -41,11 +40,10 @@
return Integer.rotateLeft(value, distance);
}
- /// CHECK-START: int Main.rotateLeftShort(short, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateLeftShort(short, int) builder (after)
/// CHECK: <<ArgVal:s\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
@@ -62,11 +60,10 @@
return Integer.rotateLeft(value, distance);
}
- /// CHECK-START: int Main.rotateLeftChar(char, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateLeftChar(char, int) builder (after)
/// CHECK: <<ArgVal:c\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
@@ -83,11 +80,10 @@
return Integer.rotateLeft(value, distance);
}
- /// CHECK-START: int Main.rotateLeftInt(int, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateLeftInt(int, int) builder (after)
/// CHECK: <<ArgVal:i\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
@@ -104,11 +100,10 @@
return Integer.rotateLeft(value, distance);
}
- /// CHECK-START: long Main.rotateLeftLong(long, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: long Main.rotateLeftLong(long, int) builder (after)
/// CHECK: <<ArgVal:j\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateLeft
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:LongRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
@@ -125,11 +120,10 @@
return Long.rotateLeft(value, distance);
}
- /// CHECK-START: int Main.rotateRightByte(byte, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateRightByte(byte, int) builder (after)
/// CHECK: <<ArgVal:b\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
@@ -145,11 +139,10 @@
return Integer.rotateRight(value, distance);
}
- /// CHECK-START: int Main.rotateRightShort(short, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateRightShort(short, int) builder (after)
/// CHECK: <<ArgVal:s\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
@@ -165,11 +158,10 @@
return Integer.rotateRight(value, distance);
}
- /// CHECK-START: int Main.rotateRightChar(char, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateRightChar(char, int) builder (after)
/// CHECK: <<ArgVal:c\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
@@ -185,11 +177,10 @@
return Integer.rotateRight(value, distance);
}
- /// CHECK-START: int Main.rotateRightInt(int, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateRightInt(int, int) builder (after)
/// CHECK: <<ArgVal:i\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
@@ -205,11 +196,10 @@
return Integer.rotateRight(value, distance);
}
- /// CHECK-START: long Main.rotateRightLong(long, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: long Main.rotateRightLong(long, int) builder (after)
/// CHECK: <<ArgVal:j\d+>> ParameterValue
/// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateRight
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:LongRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
@@ -226,11 +216,10 @@
}
- /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) builder (after)
/// CHECK: <<ArgVal:i\d+>> ParameterValue
/// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateLeft
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
@@ -247,11 +236,10 @@
return Integer.rotateLeft(value, distance);
}
- /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) builder (after)
/// CHECK: <<ArgVal:i\d+>> ParameterValue
/// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>{{(,[ij]\d+)?}}] intrinsic:IntegerRotateRight
/// CHECK-DAG: Return [<<Result>>]
/// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
diff --git a/test/566-checker-signum/smali/Main2.smali b/test/566-checker-signum/smali/Main2.smali
index d99ad86..767bed2 100644
--- a/test/566-checker-signum/smali/Main2.smali
+++ b/test/566-checker-signum/smali/Main2.smali
@@ -15,12 +15,11 @@
.class public LMain2;
.super Ljava/lang/Object;
-## CHECK-START: int Main2.signBoolean(boolean) intrinsics_recognition (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Main2.signBoolean(boolean) builder (after)
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
-## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>,<<Method>>] intrinsic:IntegerSignum
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>{{(,[ij]\d+)?}}] intrinsic:IntegerSignum
## CHECK-DAG: Return [<<Result>>]
## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier (after)
diff --git a/test/566-checker-signum/src-art/Main.java b/test/566-checker-signum/src-art/Main.java
index f1e1e1b..ea01785 100644
--- a/test/566-checker-signum/src-art/Main.java
+++ b/test/566-checker-signum/src-art/Main.java
@@ -18,7 +18,7 @@
public class Main {
- /// CHECK-START: int Main.signByte(byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.signByte(byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
/// CHECK-DAG: Return [<<Result>>]
@@ -33,7 +33,7 @@
return Integer.signum(x);
}
- /// CHECK-START: int Main.signShort(short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.signShort(short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
/// CHECK-DAG: Return [<<Result>>]
@@ -48,7 +48,7 @@
return Integer.signum(x);
}
- /// CHECK-START: int Main.signChar(char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.signChar(char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
/// CHECK-DAG: Return [<<Result>>]
@@ -63,7 +63,7 @@
return Integer.signum(x);
}
- /// CHECK-START: int Main.signInt(int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.signInt(int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
/// CHECK-DAG: Return [<<Result>>]
@@ -78,7 +78,7 @@
return Integer.signum(x);
}
- /// CHECK-START: int Main.signLong(long) intrinsics_recognition (after)
+ /// CHECK-START: int Main.signLong(long) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongSignum
/// CHECK-DAG: Return [<<Result>>]
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 7c1507f..17ccd9a 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -41,11 +41,12 @@
header = OatQuickMethodHeader::FromEntryPoint(pc);
break;
} else {
+ ScopedThreadSuspension sts(soa.Self(), kSuspended);
// Sleep to yield to the compiler thread.
usleep(1000);
- // Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(method, soa.Self(), /* osr */ false);
}
CodeInfo info(header);
diff --git a/test/567-checker-compare/smali/Smali.smali b/test/567-checker-compare/smali/Smali.smali
index 8fc39f1..fb6d241 100644
--- a/test/567-checker-compare/smali/Smali.smali
+++ b/test/567-checker-compare/smali/Smali.smali
@@ -15,13 +15,12 @@
.class public LSmali;
.super Ljava/lang/Object;
-## CHECK-START: int Smali.compareBooleans(boolean, boolean) intrinsics_recognition (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK-START: int Smali.compareBooleans(boolean, boolean) builder (after)
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<PhiX:i\d+>> Phi [<<One>>,<<Zero>>]
## CHECK-DAG: <<PhiY:i\d+>> Phi [<<One>>,<<Zero>>]
-## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<PhiX>>,<<PhiY>>,<<Method>>] intrinsic:IntegerCompare
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<PhiX>>,<<PhiY>>{{(,[ij]\d+)?}}] intrinsic:IntegerCompare
## CHECK-DAG: Return [<<Result>>]
## CHECK-START: int Smali.compareBooleans(boolean, boolean) instruction_simplifier (after)
diff --git a/test/567-checker-compare/src/Main.java b/test/567-checker-compare/src/Main.java
index abfaf9f..a3ff005 100644
--- a/test/567-checker-compare/src/Main.java
+++ b/test/567-checker-compare/src/Main.java
@@ -20,11 +20,10 @@
public static boolean doThrow = false;
- /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) intrinsics_recognition (after)
+ /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) builder (after)
/// CHECK-DAG: <<ArgX:i\d+>> ParameterValue
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
/// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Cmp:i\d+>> InvokeStaticOrDirect [<<ArgX>>,<<Zero>>,<<Method>>] intrinsic:IntegerCompare
+ /// CHECK-DAG: <<Cmp:i\d+>> InvokeStaticOrDirect [<<ArgX>>,<<Zero>>{{(,[ij]\d+)?}}] intrinsic:IntegerCompare
/// CHECK-DAG: GreaterThanOrEqual [<<Cmp>>,<<Zero>>]
/// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) instruction_simplifier (after)
@@ -66,7 +65,7 @@
return (Integer) m.invoke(null, x, y);
}
- /// CHECK-START: int Main.compareBytes(byte, byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareBytes(byte, byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -81,7 +80,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareShorts(short, short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareShorts(short, short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -96,7 +95,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareChars(char, char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareChars(char, char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -111,7 +110,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareInts(int, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareInts(int, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -126,7 +125,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareLongs(long, long) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareLongs(long, long) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -142,7 +141,7 @@
}
- /// CHECK-START: int Main.compareByteShort(byte, short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareByteShort(byte, short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -157,7 +156,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareByteChar(byte, char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareByteChar(byte, char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -172,7 +171,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareByteInt(byte, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareByteInt(byte, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -188,7 +187,7 @@
}
- /// CHECK-START: int Main.compareShortByte(short, byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareShortByte(short, byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -203,7 +202,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareShortChar(short, char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareShortChar(short, char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -218,7 +217,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareShortInt(short, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareShortInt(short, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -234,7 +233,7 @@
}
- /// CHECK-START: int Main.compareCharByte(char, byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareCharByte(char, byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -249,7 +248,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareCharShort(char, short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareCharShort(char, short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -264,7 +263,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareCharInt(char, int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareCharInt(char, int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -280,7 +279,7 @@
}
- /// CHECK-START: int Main.compareIntByte(int, byte) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareIntByte(int, byte) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -295,7 +294,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareIntShort(int, short) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareIntShort(int, short) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
@@ -310,7 +309,7 @@
return Integer.compare(x, y);
}
- /// CHECK-START: int Main.compareIntChar(int, char) intrinsics_recognition (after)
+ /// CHECK-START: int Main.compareIntChar(int, char) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerCompare
/// CHECK-DAG: Return [<<Result>>]
diff --git a/test/568-checker-onebit/src/Main.java b/test/568-checker-onebit/src/Main.java
index 6ce4ffb..e4d3e88 100644
--- a/test/568-checker-onebit/src/Main.java
+++ b/test/568-checker-onebit/src/Main.java
@@ -16,28 +16,28 @@
public class Main {
- /// CHECK-START: int Main.hi32(int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.hi32(int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerHighestOneBit
/// CHECK-DAG: Return [<<Result>>]
private static int hi32(int x) {
return Integer.highestOneBit(x);
}
- /// CHECK-START: int Main.lo32(int) intrinsics_recognition (after)
+ /// CHECK-START: int Main.lo32(int) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerLowestOneBit
/// CHECK-DAG: Return [<<Result>>]
private static int lo32(int x) {
return Integer.lowestOneBit(x);
}
- /// CHECK-START: long Main.hi64(long) intrinsics_recognition (after)
+ /// CHECK-START: long Main.hi64(long) builder (after)
/// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:LongHighestOneBit
/// CHECK-DAG: Return [<<Result>>]
private static long hi64(long x) {
return Long.highestOneBit(x);
}
- /// CHECK-START: long Main.lo64(long) intrinsics_recognition (after)
+ /// CHECK-START: long Main.lo64(long) builder (after)
/// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:LongLowestOneBit
/// CHECK-DAG: Return [<<Result>>]
private static long lo64(long x) {
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index faec3c3..7b88842 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -35,7 +35,7 @@
in_osr_method_(false),
in_interpreter_(false) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -95,7 +95,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -129,7 +129,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/580-checker-round/src/Main.java b/test/580-checker-round/src/Main.java
index 83bc55c..a6752b5 100644
--- a/test/580-checker-round/src/Main.java
+++ b/test/580-checker-round/src/Main.java
@@ -16,14 +16,14 @@
public class Main {
- /// CHECK-START: int Main.round32(float) intrinsics_recognition (after)
+ /// CHECK-START: int Main.round32(float) builder (after)
/// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathRoundFloat
/// CHECK-DAG: Return [<<Result>>]
private static int round32(float f) {
return Math.round(f);
}
- /// CHECK-START: long Main.round64(double) intrinsics_recognition (after)
+ /// CHECK-START: long Main.round64(double) builder (after)
/// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:MathRoundDouble
/// CHECK-DAG: Return [<<Result>>]
private static long round64(double d) {
diff --git a/test/580-checker-string-fact-intrinsics/src-art/Main.java b/test/580-checker-string-fact-intrinsics/src-art/Main.java
index a2e34bf..d0750f9 100644
--- a/test/580-checker-string-fact-intrinsics/src-art/Main.java
+++ b/test/580-checker-string-fact-intrinsics/src-art/Main.java
@@ -17,9 +17,6 @@
public class Main {
/// CHECK-START: void Main.testNewStringFromBytes() builder (after)
- /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:None
-
- /// CHECK-START: void Main.testNewStringFromBytes() intrinsics_recognition (after)
/// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:StringNewStringFromBytes
public static void testNewStringFromBytes() {
@@ -51,9 +48,6 @@
/// CHECK-START: void Main.testNewStringFromChars() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
- /// CHECK-START: void Main.testNewStringFromChars() intrinsics_recognition (after)
- /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
-
/// CHECK-START: void Main.testNewStringFromChars() inliner (after)
/// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None
@@ -64,9 +58,6 @@
}
/// CHECK-START: void Main.testNewStringFromString() builder (after)
- /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:None
-
- /// CHECK-START: void Main.testNewStringFromString() intrinsics_recognition (after)
/// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:StringNewStringFromString
public static void testNewStringFromString() {
diff --git a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
index f74e88f..bd90fe7 100644
--- a/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
+++ b/test/593-checker-boolean-2-integral-conv/smali/SmaliTests.smali
@@ -210,14 +210,12 @@
.end method
## CHECK-START: int SmaliTests.longToIntOfBoolean() builder (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
## CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
-## CHECK-DAG: <<ZToJ:j\d+>> InvokeStaticOrDirect [<<Sget>>,<<Method>>]
+## CHECK-DAG: <<ZToJ:j\d+>> InvokeStaticOrDirect [<<Sget>>{{(,[ij]\d+)?}}]
## CHECK-DAG: <<JToI:i\d+>> TypeConversion [<<ZToJ>>]
## CHECK-DAG: Return [<<JToI>>]
## CHECK-START: int SmaliTests.longToIntOfBoolean() inliner (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
@@ -228,7 +226,6 @@
## CHECK-DAG: Return [<<JToI>>]
## CHECK-START: int SmaliTests.longToIntOfBoolean() select_generator (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
## CHECK-DAG: <<One:i\d+>> IntConstant 1
## CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
@@ -236,7 +233,6 @@
## CHECK-DAG: Return [<<Sel>>]
## CHECK-START: int SmaliTests.longToIntOfBoolean() instruction_simplifier$after_bce (after)
-## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
## CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
## CHECK-DAG: Return [<<Sget>>]
.method public static longToIntOfBoolean()I
diff --git a/test/593-checker-boolean-2-integral-conv/src/Main.java b/test/593-checker-boolean-2-integral-conv/src/Main.java
index fdc0919..b085c42 100644
--- a/test/593-checker-boolean-2-integral-conv/src/Main.java
+++ b/test/593-checker-boolean-2-integral-conv/src/Main.java
@@ -100,14 +100,12 @@
}
/// CHECK-START: int Main.longToIntOfBoolean() builder (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
/// CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
- /// CHECK-DAG: <<ZToJ:j\d+>> InvokeStaticOrDirect [<<Sget>>,<<Method>>]
+ /// CHECK-DAG: <<ZToJ:j\d+>> InvokeStaticOrDirect [<<Sget>>{{(,[ij]\d+)?}}]
/// CHECK-DAG: <<JToI:i\d+>> TypeConversion [<<ZToJ>>]
/// CHECK-DAG: Return [<<JToI>>]
/// CHECK-START: int Main.longToIntOfBoolean() inliner (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
/// CHECK-DAG: <<Zero:j\d+>> LongConstant 0
/// CHECK-DAG: <<One:j\d+>> LongConstant 1
/// CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
@@ -123,7 +121,6 @@
/// CHECK-NOT: Phi
/// CHECK-START: int Main.longToIntOfBoolean() select_generator (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
/// CHECK-DAG: <<Zero:j\d+>> LongConstant 0
/// CHECK-DAG: <<One:j\d+>> LongConstant 1
/// CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
@@ -135,7 +132,6 @@
// TODO: Re-enable checks below after simplifier is updated to handle this pattern: b/63064517
// CHECK-START: int Main.longToIntOfBoolean() instruction_simplifier$after_bce (after)
- // CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
// CHECK-DAG: <<Sget:z\d+>> StaticFieldGet
// CHECK-DAG: Return [<<Sget>>]
diff --git a/test/602-deoptimizeable/info.txt b/test/602-deoptimizeable/info.txt
index d0952f9..4b6147f 100644
--- a/test/602-deoptimizeable/info.txt
+++ b/test/602-deoptimizeable/info.txt
@@ -1 +1,8 @@
Test various cases for full/partial-fragment deoptimization.
+
+TODO: we should remove this test as its expectations at point of
+writing was that debuggable apps could run un-deoptimizeable frames
+from the boot image. Today, we deoptimize the boot image as soon as
+we see the app being debuggable. Test 685-deoptimizeable is the proper
+version of this test, but we currently keep the 602 version around to
+try diagnosing a gcstress issue.
diff --git a/test/602-deoptimizeable/src/Main.java b/test/602-deoptimizeable/src/Main.java
index d995923..7a3285d 100644
--- a/test/602-deoptimizeable/src/Main.java
+++ b/test/602-deoptimizeable/src/Main.java
@@ -62,10 +62,10 @@
public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
- // Only test stack frames in compiled mode.
- if (!hasOatFile() || isInterpreted()) {
- disableStackFrameAsserts();
- }
+ // TODO: Stack frame assertions are irrelevant in this test as we now
+ // always run JIT with debuggable. 685-deoptimizeable is the proper version
+ // of this test, but we keep this version around to diagnose a gcstress issue.
+ disableStackFrameAsserts();
final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
// Single-frame deoptimization that covers partial fragment.
@@ -126,6 +126,9 @@
assertIsManaged();
map.put(new DummyObject(10), Long.valueOf(100));
assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
+ if (map.get(new DummyObject(10)) == null) {
+ System.out.println("Expected map to contain DummyObject(10)");
+ }
} catch (Exception e) {
e.printStackTrace(System.out);
}
diff --git a/test/616-cha-unloading/cha_unload.cc b/test/616-cha-unloading/cha_unload.cc
index b5166ce..f9d3874 100644
--- a/test/616-cha-unloading/cha_unload.cc
+++ b/test/616-cha-unloading/cha_unload.cc
@@ -19,6 +19,7 @@
#include <iostream>
#include "art_method.h"
+#include "base/casts.h"
#include "class_linker.h"
#include "jit/jit.h"
#include "linear_alloc.h"
@@ -51,13 +52,13 @@
jobject java_method) {
ScopedObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, java_method);
- return static_cast<jlong>(reinterpret_cast<uintptr_t>(method));
+ return reinterpret_cast64<jlong>(method);
}
extern "C" JNIEXPORT void JNICALL Java_Main_reuseArenaOfMethod(JNIEnv*,
jclass,
jlong art_method) {
- void* ptr = reinterpret_cast<void*>(static_cast<uintptr_t>(art_method));
+ void* ptr = reinterpret_cast64<void*>(art_method);
ReaderMutexLock mu(Thread::Current(), *Locks::mutator_lock_);
ReaderMutexLock mu2(Thread::Current(), *Locks::classlinker_classes_lock_);
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index ff6e335..4097e33 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -304,6 +304,19 @@
}
}
+ /// CHECK-START-ARM: void Main.$noinline$stringToShorts(short[], java.lang.String) loop_optimization (after)
+ /// CHECK-NOT: VecLoad
+
+ /// CHECK-START-ARM64: void Main.$noinline$stringToShorts(short[], java.lang.String) loop_optimization (after)
+ /// CHECK-DAG: VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: VecStore loop:<<Loop>> outer_loop:none
+ private static void $noinline$stringToShorts(short[] dest, String src) {
+ int min = Math.min(dest.length, src.length());
+ for (int i = 0; i < min; ++i) {
+ dest[i] = (short) src.charAt(i);
+ }
+ }
+
// A strange function that does not inline.
private static void $noinline$foo(boolean x, int n) {
if (n < 0)
@@ -684,6 +697,12 @@
expectEquals(aa[i], cc.charAt(i));
}
+ short[] s2s = new short[12];
+ $noinline$stringToShorts(s2s, "abcdefghijkl");
+ for (int i = 0; i < s2s.length; ++i) {
+ expectEquals((short) "abcdefghijkl".charAt(i), s2s[i]);
+ }
+
envUsesInCond();
short[] dd = new short[23];
diff --git a/test/624-checker-stringops/smali/Smali.smali b/test/624-checker-stringops/smali/Smali.smali
index 7b063c0..8600a0a 100644
--- a/test/624-checker-stringops/smali/Smali.smali
+++ b/test/624-checker-stringops/smali/Smali.smali
@@ -16,23 +16,13 @@
.class public LSmali;
.super Ljava/lang/Object;
-## CHECK-START: int Smali.bufferLen2() instruction_simplifier (before)
+## CHECK-START: int Smali.bufferLen2() builder (after)
## CHECK-DAG: <<New:l\d+>> NewInstance
## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<Append1>>]
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null1>>,<<String2>>] intrinsic:StringBufferAppend
-## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append2>>]
-## CHECK-DAG: InvokeVirtual [<<Null2>>] intrinsic:StringBufferLength
-
-## CHECK-START: int Smali.bufferLen2() instruction_simplifier (after)
-## CHECK-DAG: <<New:l\d+>> NewInstance
-## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBufferAppend
-## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBufferAppend
-## CHECK-DAG: InvokeVirtual [<<New>>] intrinsic:StringBufferLength
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBufferAppend
+## CHECK-DAG: InvokeVirtual [<<Append2>>] intrinsic:StringBufferLength
.method public static bufferLen2()I
.registers 3
@@ -57,12 +47,10 @@
## CHECK-START: int Smali.builderLen2() instruction_simplifier (before)
## CHECK-DAG: <<New:l\d+>> NewInstance
## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>]
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend
-## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>]
-## CHECK-DAG: InvokeVirtual [<<Null3>>] intrinsic:StringBuilderLength
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: InvokeVirtual [<<Append2>>] intrinsic:StringBuilderLength
## CHECK-START: int Smali.builderLen2() instruction_simplifier (after)
## CHECK-DAG: <<New:l\d+>> NewInstance
@@ -93,18 +81,16 @@
.end method
## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (before)
-## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
-## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
-## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
-## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
-## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBufferLength loop:none
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBufferAppend loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
+## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBufferLength loop:none
## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (after)
## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
@@ -152,18 +138,16 @@
.end method
## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (before)
-## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
-## CHECK-DAG: <<Null2:l\d+>> NullCheck [<<Append1>>] loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Null2>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
-## CHECK-DAG: <<Null3:l\d+>> NullCheck [<<Append2>>] loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Null3>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
-## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBuilderLength loop:none
+## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>> NullCheck [<<New>>] loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>> NullCheck [<<New>>] loop:none
+## CHECK-DAG: InvokeVirtual [<<Null4>>] intrinsic:StringBuilderLength loop:none
## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (after)
## CHECK-DAG: <<New:l\d+>> NewInstance loop:none
diff --git a/test/631-checker-fp-abs/src/Main.java b/test/631-checker-fp-abs/src/Main.java
index 2db93b8..2d04e36 100644
--- a/test/631-checker-fp-abs/src/Main.java
+++ b/test/631-checker-fp-abs/src/Main.java
@@ -31,7 +31,7 @@
public static boolean doThrow = false;
- /// CHECK-START: float Main.$opt$noinline$absSP(float) intrinsics_recognition (after)
+ /// CHECK-START: float Main.$opt$noinline$absSP(float) builder (after)
/// CHECK-DAG: <<Result:f\d+>> InvokeStaticOrDirect intrinsic:MathAbsFloat
/// CHECK-DAG: Return [<<Result>>]
private static float $opt$noinline$absSP(float f) {
@@ -41,7 +41,7 @@
return Math.abs(f);
}
- /// CHECK-START: double Main.$opt$noinline$absDP(double) intrinsics_recognition (after)
+ /// CHECK-START: double Main.$opt$noinline$absDP(double) builder (after)
/// CHECK-DAG: <<Result:d\d+>> InvokeStaticOrDirect intrinsic:MathAbsDouble
/// CHECK-DAG: Return [<<Result>>]
private static double $opt$noinline$absDP(double d) {
diff --git a/test/638-checker-inline-cache-intrinsic/src/Main.java b/test/638-checker-inline-cache-intrinsic/src/Main.java
index 472cbf6..4a9aba5 100644
--- a/test/638-checker-inline-cache-intrinsic/src/Main.java
+++ b/test/638-checker-inline-cache-intrinsic/src/Main.java
@@ -52,11 +52,11 @@
/// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) inliner (after)
/// CHECK: Deoptimize
- /// CHECK: InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+ /// CHECK: InvokeVirtual method_name:java.lang.String.equals intrinsic:StringEquals
/// CHECK-START: boolean Main.$noinline$stringEquals(java.lang.Object) instruction_simplifier$after_inlining (after)
/// CHECK: Deoptimize
- /// CHECK: InvokeVirtual method_name:java.lang.Object.equals intrinsic:StringEquals
+ /// CHECK: InvokeVirtual method_name:java.lang.String.equals intrinsic:StringEquals
public static boolean $noinline$stringEquals(Object obj) {
return obj.equals("def");
diff --git a/test/669-moveable-string-class-equals/info.txt b/test/669-moveable-string-class-equals/info.txt
deleted file mode 100644
index 1d3202ef..0000000
--- a/test/669-moveable-string-class-equals/info.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Regression test for String.equals() intrinsic instanceof check
-when the String.class is moveable.
diff --git a/test/669-moveable-string-class-equals/run b/test/669-moveable-string-class-equals/run
deleted file mode 100755
index 7c74d8c..0000000
--- a/test/669-moveable-string-class-equals/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run without image, so that String.class is moveable.
-# Reduce heap size to force more frequent GCs.
-${RUN} --no-image --runtime-option -Xmx16m "$@"
diff --git a/test/669-moveable-string-class-equals/src/Main.java b/test/669-moveable-string-class-equals/src/Main.java
deleted file mode 100644
index d182d51..0000000
--- a/test/669-moveable-string-class-equals/src/Main.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
- if (!hasJit()) {
- // Make the test pass if not using JIT.
- return;
- }
- if (hasImage()) {
- throw new Error("The `run` script should prevent this test from running with an image!");
- }
- if (!isClassMoveable(String.class)) {
- throw new Error("String.class not moveable despite running without image!");
- }
-
- // Make sure the Main.test() is JIT-compiled and then call it.
- ensureJitCompiled(Main.class, "test");
- test();
- }
-
- public static void test() {
- int length = 5;
-
- // Hide the type of these strings in an Object array,
- // so that we treat them as Object for the String.equals() below.
- Object[] array = new Object[length];
- for (int i = 0; i != length; ++i) {
- array[i] = "V" + i;
- }
-
- // Continually check string equality between a newly allocated String and an
- // already allocated String with the same contents while allocating over 128MiB
- // memory (with heap size limited to 16MiB), ensuring we run GC and stress the
- // instanceof check in the String.equals() implementation.
- for (int count = 0; count != 128 * 1024; ++count) {
- for (int i = 0; i != length; ++i) {
- allocateAtLeast1KiB();
- assertTrue(("V" + i).equals(array[i]));
- }
- }
- }
-
- public static void allocateAtLeast1KiB() {
- // Give GC more work by allocating Object arrays.
- memory[allocationIndex] = new Object[1024 / 4];
- ++allocationIndex;
- if (allocationIndex == memory.length) {
- allocationIndex = 0;
- }
- }
-
- public static void assertTrue(boolean value) {
- if (!value) {
- throw new Error("Assertion failed!");
- }
- }
-
- private native static boolean hasJit();
- private native static boolean hasImage();
- private native static boolean isClassMoveable(Class<?> cls);
- private static native void ensureJitCompiled(Class<?> itf, String method_name);
-
- // We shall retain some allocated memory and release old allocations
- // so that the GC has something to do.
- public static Object[] memory = new Object[4096];
- public static int allocationIndex = 0;
-}
diff --git a/test/684-checker-simd-dotprod/expected.txt b/test/684-checker-simd-dotprod/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/684-checker-simd-dotprod/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/684-checker-simd-dotprod/info.txt b/test/684-checker-simd-dotprod/info.txt
new file mode 100644
index 0000000..6c1efb6
--- /dev/null
+++ b/test/684-checker-simd-dotprod/info.txt
@@ -0,0 +1 @@
+Functional tests on dot product idiom SIMD vectorization.
diff --git a/test/684-checker-simd-dotprod/src/Main.java b/test/684-checker-simd-dotprod/src/Main.java
new file mode 100644
index 0000000..e0c8716
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/Main.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import other.TestByte;
+import other.TestCharShort;
+import other.TestVarious;
+
+/**
+ * Tests for dot product idiom vectorization.
+ */
+public class Main {
+ public static void main(String[] args) {
+ TestByte.run();
+ TestCharShort.run();
+ TestVarious.run();
+ System.out.println("passed");
+ }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestByte.java b/test/684-checker-simd-dotprod/src/other/TestByte.java
new file mode 100644
index 0000000..9acfc59
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestByte.java
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization: byte case.
+ */
+public class TestByte {
+
+ public static final int ARRAY_SIZE = 1024;
+
+ /// CHECK-START: int other.TestByte.testDotProdSimple(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSimple(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdSimple(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdComplex(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC1:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:b\d+>> TypeConversion [<<AddC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC2:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:b\d+>> TypeConversion [<<AddC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplex(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplex(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((byte)(a[i] + 1)) * ((byte)(b[i] + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsigned(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSimpleUnsigned(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdSimpleUnsigned(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (a[i] & 0xff) * (b[i] & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdComplexUnsigned(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:a\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:a\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexUnsigned(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexUnsigned(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (((a[i] & 0xff) + 1) & 0xff) * (((b[i] & 0xff) + 1) & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdComplexUnsignedCastedToSigned(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:b\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:b\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexUnsignedCastedToSigned(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexUnsignedCastedToSigned(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((byte)((a[i] & 0xff) + 1)) * ((byte)((b[i] & 0xff) + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdComplexSignedCastedToUnsigned(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:a\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:a\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdComplexSignedCastedToUnsigned(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexSignedCastedToUnsigned(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((a[i] + 1) & 0xff) * ((b[i] + 1) & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdSignedWidening(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Int8
+ public static final int testDotProdSignedWidening(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((short)(a[i])) * ((short)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdParamSigned(int, byte[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Int8
+ public static final int testDotProdParamSigned(int x, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (byte)(x) * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestByte.testDotProdParamUnsigned(int, byte[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Uint8
+ public static final int testDotProdParamUnsigned(int x, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (x & 0xff) * (b[i] & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ // No DOTPROD cases.
+
+ /// CHECK-START: int other.TestByte.testDotProdIntParam(int, byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdIntParam(int x, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = b[i] * (x);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSignedToChar(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSignedToChar(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((char)(a[i])) * ((char)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ // Cases when result of Mul is type-converted are not supported.
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToSignedByte(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToSignedByte(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ byte temp = (byte)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToUnsignedByte(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToUnsignedByte(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ s += (a[i] * b[i]) & 0xff;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToSignedByte(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToSignedByte(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ byte temp = (byte)((a[i] & 0xff) * (b[i] & 0xff));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToUnsignedByte(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToUnsignedByte(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ s += ((a[i] & 0xff) * (b[i] & 0xff)) & 0xff;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToShort(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToShort(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleCastedToChar(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToChar(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToShort(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToShort(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)((a[i] & 0xff) * (b[i] & 0xff));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToChar(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToChar(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)((a[i] & 0xff) * (b[i] & 0xff));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdSimpleUnsignedCastedToLong(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToLong(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ long temp = (long)((a[i] & 0xff) * (b[i] & 0xff));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestByte.testDotProdUnsignedSigned(byte[], byte[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdUnsignedSigned(byte[] a, byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (a[i] & 0xff) * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void testDotProd(byte[] b1, byte[] b2, int[] results) {
+ expectEquals(results[0], testDotProdSimple(b1, b2));
+ expectEquals(results[1], testDotProdComplex(b1, b2));
+ expectEquals(results[2], testDotProdSimpleUnsigned(b1, b2));
+ expectEquals(results[3], testDotProdComplexUnsigned(b1, b2));
+ expectEquals(results[4], testDotProdComplexUnsignedCastedToSigned(b1, b2));
+ expectEquals(results[5], testDotProdComplexSignedCastedToUnsigned(b1, b2));
+ expectEquals(results[6], testDotProdSignedWidening(b1, b2));
+ expectEquals(results[7], testDotProdParamSigned(-128, b2));
+ expectEquals(results[8], testDotProdParamUnsigned(-128, b2));
+ expectEquals(results[9], testDotProdIntParam(-128, b2));
+ expectEquals(results[10], testDotProdSignedToChar(b1, b2));
+ expectEquals(results[11], testDotProdSimpleCastedToSignedByte(b1, b2));
+ expectEquals(results[12], testDotProdSimpleCastedToUnsignedByte(b1, b2));
+ expectEquals(results[13], testDotProdSimpleUnsignedCastedToSignedByte(b1, b2));
+ expectEquals(results[14], testDotProdSimpleUnsignedCastedToUnsignedByte(b1, b2));
+ expectEquals(results[15], testDotProdSimpleCastedToShort(b1, b2));
+ expectEquals(results[16], testDotProdSimpleCastedToChar(b1, b2));
+ expectEquals(results[17], testDotProdSimpleUnsignedCastedToShort(b1, b2));
+ expectEquals(results[18], testDotProdSimpleUnsignedCastedToChar(b1, b2));
+ expectEquals(results[19], testDotProdSimpleUnsignedCastedToLong(b1, b2));
+ expectEquals(results[20], testDotProdUnsignedSigned(b1, b2));
+ }
+
+ public static void run() {
+ byte[] b1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+ byte[] b2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+ int[] results_1 = { 64516, 65548, 64516, 65548, 65548, 65548, 64516, -65024, 65024, -65024,
+ 64516, 4, 4, 4, 4, 64516, 64516, 64516, 64516, 64516, 64516 };
+ testDotProd(b1_1, b2_1, results_1);
+
+ byte[] b1_2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+ byte[] b2_2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+ int[] results_2 = { 80645, 81931, 80645, 81931, 81931, 81931, 80645, -81280, 81280, -81280,
+ 80645, 5, 5, 5, 5, 80645, 80645, 80645, 80645, 80645, 80645 };
+ testDotProd(b1_2, b2_2, results_2);
+
+ byte[] b1_3 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+ byte[] b2_3 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+ int[] results_3 = { -81280, 81291, 81280, 82571, 81291, 82571, -81280, -81280, 81280, -81280,
+ 41534080, -640, 640, -640, 640, -81280, 246400, 81280, 81280, 81280, 81280 };
+ testDotProd(b1_3, b2_3, results_3);
+
+ byte[] b1_4 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+ byte[] b2_4 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+ int[] results_4 = { 81920, 80656, 81920, 83216, 80656, 83216, 81920, 81920, 81920, 81920,
+ -83804160, 0, 0, 0, 0, 81920, 81920, 81920, 81920, 81920, -81920 };
+ testDotProd(b1_4, b2_4, results_4);
+ }
+
+ public static void main(String[] args) {
+ run();
+ }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestCharShort.java b/test/684-checker-simd-dotprod/src/other/TestCharShort.java
new file mode 100644
index 0000000..9cb9db5
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestCharShort.java
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization: char and short case.
+ */
+public class TestCharShort {
+
+ public static final int ARRAY_SIZE = 1024;
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimple(short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSimple(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdSimple(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdComplex(short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC1:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:s\d+>> TypeConversion [<<AddC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC2:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:s\d+>> TypeConversion [<<AddC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplex(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplex(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((short)(a[i] + 1)) * ((short)(b[i] + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsigned(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSimpleUnsigned(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdSimpleUnsigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdComplexUnsigned(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:c\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:c\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexUnsigned(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexUnsigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((char)(a[i] + 1)) * ((char)(b[i] + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdComplexUnsignedCastedToSigned(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:s\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:s\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexUnsignedCastedToSigned(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Int16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexUnsignedCastedToSigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((short)(a[i] + 1)) * ((short)(b[i] + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdComplexSignedCastedToUnsigned(short[], short[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddC:i\d+>> Add [<<Get1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:c\d+>> TypeConversion [<<AddC>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<AddGets:i\d+>> Add [<<Get2>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:c\d+>> TypeConversion [<<AddGets>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdComplexSignedCastedToUnsigned(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd1:d\d+>> VecAdd [<<Load1>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<VAdd2:d\d+>> VecAdd [<<Load2>>,<<Repl>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<VAdd1>>,<<VAdd2>>] type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdComplexSignedCastedToUnsigned(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((char)(a[i] + 1)) * ((char)(b[i] + 1));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSignedToInt(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Int16
+ public static final int testDotProdSignedToInt(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((int)(a[i])) * ((int)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdParamSigned(int, short[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Int16
+ public static final int testDotProdParamSigned(int x, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (short)(x) * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdParamUnsigned(int, char[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Uint16
+ public static final int testDotProdParamUnsigned(int x, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (char)(x) * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdIntParam(int, short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdIntParam(int x, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = b[i] * (x);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START-{ARM64}: int other.TestCharShort.testDotProdSignedToChar(short[], short[]) loop_optimization (after)
+ /// CHECK-DAG: VecDotProd type:Uint16
+ public static final int testDotProdSignedToChar(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((char)(a[i])) * ((char)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ // Cases when result of Mul is type-converted are not supported.
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleMulCastedToSigned(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd type:Uint16
+ public static final int testDotProdSimpleMulCastedToSigned(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleMulCastedToUnsigned(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleMulCastedToUnsigned(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedMulCastedToSigned(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedMulCastedToSigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedMulCastedToUnsigned(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedMulCastedToUnsigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleCastedToShort(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToShort(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleCastedToChar(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleCastedToChar(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToShort(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToShort(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ short temp = (short)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToChar(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToChar(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ char temp = (char)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSimpleUnsignedCastedToLong(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSimpleUnsignedCastedToLong(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ long temp = (long)(a[i] * b[i]);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ // Narrowing conversions.
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSignedNarrowerSigned(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSignedNarrowerSigned(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((byte)(a[i])) * ((byte)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdSignedNarrowerUnsigned(short[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdSignedNarrowerUnsigned(short[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (a[i] & 0xff) * (b[i] & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdUnsignedNarrowerSigned(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdUnsignedNarrowerSigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((byte)(a[i])) * ((byte)(b[i]));
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdUnsignedNarrowerUnsigned(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdUnsignedNarrowerUnsigned(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = (a[i] & 0xff) * (b[i] & 0xff);
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ /// CHECK-START: int other.TestCharShort.testDotProdUnsignedSigned(char[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdUnsignedSigned(char[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s - 1;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void testDotProd(short[] s1, short[] s2, char[] c1, char[] c2, int[] results) {
+ expectEquals(results[0], testDotProdSimple(s1, s2));
+ expectEquals(results[1], testDotProdComplex(s1, s2));
+ expectEquals(results[2], testDotProdSimpleUnsigned(c1, c2));
+ expectEquals(results[3], testDotProdComplexUnsigned(c1, c2));
+ expectEquals(results[4], testDotProdComplexUnsignedCastedToSigned(c1, c2));
+ expectEquals(results[5], testDotProdComplexSignedCastedToUnsigned(s1, s2));
+ expectEquals(results[6], testDotProdSignedToInt(s1, s2));
+ expectEquals(results[7], testDotProdParamSigned(-32768, s2));
+ expectEquals(results[8], testDotProdParamUnsigned(-32768, c2));
+ expectEquals(results[9], testDotProdIntParam(-32768, s2));
+ expectEquals(results[10], testDotProdSignedToChar(s1, s2));
+ expectEquals(results[11], testDotProdSimpleMulCastedToSigned(s1, s2));
+ expectEquals(results[12], testDotProdSimpleMulCastedToUnsigned(s1, s2));
+ expectEquals(results[13], testDotProdSimpleUnsignedMulCastedToSigned(c1, c2));
+ expectEquals(results[14], testDotProdSimpleUnsignedMulCastedToUnsigned(c1, c2));
+ expectEquals(results[15], testDotProdSimpleCastedToShort(s1, s2));
+ expectEquals(results[16], testDotProdSimpleCastedToChar(s1, s2));
+ expectEquals(results[17], testDotProdSimpleUnsignedCastedToShort(c1, c2));
+ expectEquals(results[18], testDotProdSimpleUnsignedCastedToChar(c1, c2));
+ expectEquals(results[19], testDotProdSimpleUnsignedCastedToLong(c1, c2));
+ expectEquals(results[20], testDotProdSignedNarrowerSigned(s1, s2));
+ expectEquals(results[21], testDotProdSignedNarrowerUnsigned(s1, s2));
+ expectEquals(results[22], testDotProdUnsignedNarrowerSigned(c1, c2));
+ expectEquals(results[23], testDotProdUnsignedNarrowerUnsigned(c1, c2));
+ expectEquals(results[24], testDotProdUnsignedSigned(c1, s2));
+ }
+
+ public static void run() {
+ final short MAX_S = Short.MAX_VALUE;
+ final short MIN_S = Short.MAX_VALUE;
+
+ short[] s1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ short[] s2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ char[] c1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ char[] c2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ int[] results_1 = { 2147352578, -2147483634, 2147352578, -2147483634, -2147483634, -2147483634,
+ 2147352578, -2147418112, 2147418112, -2147418112, 2147352578,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2147352578, 2, 130050, 2, 130050, 2147352578 };
+ testDotProd(s1_1, s2_1, c1_1, c2_1, results_1);
+
+ short[] s1_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+ short[] s2_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+ char[] c1_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+ char[] c2_2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S, MAX_S, MAX_S };
+ int[] results_2 = { -262140, 12, -262140, 12, 12, 12, -262140, 131072, -131072, 131072,
+ -262140, 4, 4, 4, 4, 4, 4, 4, 4, -262140, 4, 260100, 4, 260100, -262140 };
+ testDotProd(s1_2, s2_2, c1_2, c2_2, results_2);
+
+ short[] s1_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ short[] s2_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ char[] c1_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ char[] c2_3 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MAX_S, MAX_S };
+ int[] results_3 = { 2147352578, -2147483634, 2147352578, -2147483634, -2147483634,
+ -2147483634, 2147352578, -2147418112, 2147418112, -2147418112,
+ 2147352578, 2, 2, 2, 2, 2, 2, 2, 2, 2147352578, 2, 130050, 2,
+ 130050, 2147352578};
+ testDotProd(s1_3, s2_3, c1_3, c2_3, results_3);
+
+
+ short[] s1_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ short[] s2_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ char[] c1_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ char[] c2_4 = { MIN_S, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ int[] results_4 = { -1073938429, -1073741811, -1073938429, -1073741811, -1073741811,
+ -1073741811, -1073938429, 1073840128, -1073840128, 1073840128,
+ -1073938429, 3, 3, 3, 3, 3, 3, 3, 3, -1073938429, 3, 195075, 3,
+ 195075, -1073938429 };
+ testDotProd(s1_4, s2_4, c1_4, c2_4, results_4);
+ }
+
+ public static void main(String[] args) {
+ run();
+ }
+}
diff --git a/test/684-checker-simd-dotprod/src/other/TestVarious.java b/test/684-checker-simd-dotprod/src/other/TestVarious.java
new file mode 100644
index 0000000..3f46098
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestVarious.java
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization.
+ */
+public class TestVarious {
+
+ /// CHECK-START: int other.TestVarious.testDotProdConstRight(byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Const89>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdConstRight(byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const89>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdConstRight(byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = b[i] * 89;
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdConstLeft(byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Const89>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdConstLeft(byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Const89:i\d+>> IntConstant 89 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const89>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdConstLeft(byte[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = 89 * (b[i] & 0xff);
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdLoopInvariantConvRight(byte[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Param:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<ConstL:i\d+>> IntConstant 129 loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Param>>,<<ConstL>>] loop:none
+ /// CHECK-DAG: <<TypeCnv:b\d+>> TypeConversion [<<AddP>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<TypeCnv>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdLoopInvariantConvRight(byte[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Param:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<ConstL:i\d+>> IntConstant 129 loop:none
+ /// CHECK-DAG: <<AddP:i\d+>> Add [<<Param>>,<<ConstL>>] loop:none
+ /// CHECK-DAG: <<TypeCnv:b\d+>> TypeConversion [<<AddP>>] loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<TypeCnv>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdLoopInvariantConvRight(byte[] b, int param) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = b[i] * ((byte)(param + 129));
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdByteToChar(char[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdByteToChar(char[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = ((char)((byte)(a[i] + 129))) * b[i];
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdMixedSize(byte[], short[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdMixedSize(byte[] a, short[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdMixedSizeAndSign(byte[], char[]) loop_optimization (after)
+ /// CHECK-NOT: VecDotProd
+ public static final int testDotProdMixedSizeAndSign(byte[] a, char[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdInt32(int[], int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdInt32(int[], int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Set:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul:d\d+>> VecMul [<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecAdd [<<Phi2>>,<<Mul>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-DAG: <<Reduce:d\d+>> VecReduce [<<Phi2>>] loop:none
+ /// CHECK-DAG: VecExtractScalar [<<Reduce>>] loop:none
+ public static final int testDotProdInt32(int[] a, int[] b) {
+ int s = 1;
+ for (int i = 0; i < b.length; i++) {
+ int temp = a[i] * b[i];
+ s += temp;
+ }
+ return s;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsigned1(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Const2>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul1:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:a\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC2:a\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul2:i\d+>> Mul [<<TypeC1>>,<<TypeC2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi3>>,<<Mul2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsigned1(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Const2>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi3>>,<<Load1>>,<<Load2>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ public static final int testDotProdBothSignedUnsigned1(byte[] a, byte[] b) {
+ int s1 = 1;
+ int s2 = 2;
+ for (int i = 0; i < b.length; i++) {
+ byte a_val = a[i];
+ byte b_val = b[i];
+ s1 += a_val * b_val;
+ s2 += (a_val & 0xff) * (b_val & 0xff);
+ }
+ return s1 + s2;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsigned2(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Const2>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeC1:a\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul1:i\d+>> Mul [<<Get2>>,<<TypeC1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi3>>,<<Mul1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul2:i\d+>> Mul [<<Get1>>,<<Const42>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsigned2(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 loop:none
+ /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Const42>>] loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Const2>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi3>>,<<Load2>>,<<Load1>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Repl>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ public static final int testDotProdBothSignedUnsigned2(byte[] a, byte[] b) {
+ int s1 = 1;
+ int s2 = 2;
+ for (int i = 0; i < b.length; i++) {
+ byte a_val = a[i];
+ byte b_val = b[i];
+ s2 += (a_val & 0xff) * (b_val & 0xff);
+ s1 += a_val * 42;
+ }
+ return s1 + s2;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsignedDoubleLoad(byte[], byte[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Const2>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<GetB1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<GetB2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul1:i\d+>> Mul [<<GetB1>>,<<GetB2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<GetA1:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<GetA2:a\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul2:i\d+>> Mul [<<GetA1>>,<<GetA2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi3>>,<<Mul2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsignedDoubleLoad(byte[], byte[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Const16:i\d+>> IntConstant 16 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Const2>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Int8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load3:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load4:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi3>>,<<Load3>>,<<Load4>>] type:Uint8 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const16>>] loop:<<Loop>> outer_loop:none
+ public static final int testDotProdBothSignedUnsignedDoubleLoad(byte[] a, byte[] b) {
+ int s1 = 1;
+ int s2 = 2;
+ for (int i = 0; i < b.length; i++) {
+ s1 += a[i] * b[i];
+ s2 += (a[i] & 0xff) * (b[i] & 0xff);
+ }
+ return s1 + s2;
+ }
+
+ /// CHECK-START: int other.TestVarious.testDotProdBothSignedUnsignedChar(char[], char[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Const1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:i\d+>> Phi [<<Const2>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeS1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<TypeS2:s\d+>> TypeConversion [<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul1:i\d+>> Mul [<<TypeS1>>,<<TypeS2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi3>>,<<Mul1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Mul2:i\d+>> Mul [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi2>>,<<Mul2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const1>>] loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START-{ARM64}: int other.TestVarious.testDotProdBothSignedUnsignedChar(char[], char[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Const8:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Set1:d\d+>> VecSetScalars [<<Const1>>] loop:none
+ /// CHECK-DAG: <<Set2:d\d+>> VecSetScalars [<<Const2>>] loop:none
+ //
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:d\d+>> Phi [<<Set1>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi3:d\d+>> Phi [<<Set2>>,{{d\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load1:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Load2:d\d+>> VecLoad [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi3>>,<<Load1>>,<<Load2>>] type:Int16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecDotProd [<<Phi2>>,<<Load1>>,<<Load2>>] type:Uint16 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: Add [<<Phi1>>,<<Const8>>] loop:<<Loop>> outer_loop:none
+ public static final int testDotProdBothSignedUnsignedChar(char[] a, char[] b) {
+ int s1 = 1;
+ int s2 = 2;
+ for (int i = 0; i < b.length; i++) {
+ char a_val = a[i];
+ char b_val = b[i];
+ s2 += ((short)a_val) * ((short)b_val);
+ s1 += a_val * b_val;
+ }
+ return s1 + s2;
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void run() {
+ final short MAX_S = Short.MAX_VALUE;
+ final short MIN_S = Short.MAX_VALUE;
+
+ byte[] b1 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+ byte[] b2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+
+ char[] c1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+ char[] c2 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+
+ int[] i1 = { -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -128, -128, -128, -128 };
+ int[] i2 = { 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127 };
+
+ short[] s1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MIN_S, MIN_S };
+
+ expectEquals(56516, testDotProdConstRight(b2));
+ expectEquals(56516, testDotProdConstLeft(b2));
+ expectEquals(1271, testDotProdLoopInvariantConvRight(b2, 129));
+ expectEquals(-8519423, testDotProdByteToChar(c1, c2));
+ expectEquals(-8388351, testDotProdMixedSize(b1, s1));
+ expectEquals(-8388351, testDotProdMixedSizeAndSign(b1, c2));
+ expectEquals(-81279, testDotProdInt32(i1, i2));
+ expectEquals(3, testDotProdBothSignedUnsigned1(b1, b2));
+ expectEquals(54403, testDotProdBothSignedUnsigned2(b1, b2));
+ expectEquals(3, testDotProdBothSignedUnsignedDoubleLoad(b1, b2));
+ expectEquals(-262137, testDotProdBothSignedUnsignedChar(c1, c2));
+ }
+
+ public static void main(String[] args) {
+ run();
+ }
+}
diff --git a/test/684-select-condition/expected.txt b/test/684-select-condition/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/684-select-condition/expected.txt
diff --git a/test/684-select-condition/info.txt b/test/684-select-condition/info.txt
new file mode 100644
index 0000000..f9d4acd
--- /dev/null
+++ b/test/684-select-condition/info.txt
@@ -0,0 +1 @@
+Regression test for a bug in ARM's code generator for HSelect.
diff --git a/test/684-select-condition/src/Main.java b/test/684-select-condition/src/Main.java
new file mode 100644
index 0000000..196ff1a
--- /dev/null
+++ b/test/684-select-condition/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String args[]) {
+ doFloatingPointTest("1", "1.0");
+ doFloatingPointTest("4", "2.0");
+ checkValue(String.valueOf(doIntegerTest1(4)), "0");
+ checkValue(String.valueOf(doIntegerTest2(4)), "4");
+
+ // Another variant of the floating point test, but less brittle.
+ staticField = 1;
+ checkValue(String.valueOf($noinline$test()), "1.0");
+ staticField = 4;
+ checkValue(String.valueOf($noinline$test()), "2.0");
+ }
+
+ // This code is a reduced version of the original reproducer. The arm
+ // code generator used to generate wrong code for it. Note that this
+ // test is very brittle and a simple change in it could cause the compiler
+ // to not trip.
+ public static void doFloatingPointTest(String s, String expected) {
+ float a = (float)Integer.valueOf(s);
+ a = a < 2.0f ? a : 2.0f;
+ checkValue("" + a, expected);
+ }
+
+ // The compiler used to trip on the two following methods. The test there
+ // is very brittle and requires not running constant folding after
+ // load/store elimination.
+ public static int doIntegerTest1(int param) {
+ Main main = new Main();
+ main.field = 0;
+ return (main.field == 0) ? 0 : param;
+ }
+
+ public static int doIntegerTest2(int param) {
+ Main main = new Main();
+ main.field = 0;
+ return (main.field != 0) ? 0 : param;
+ }
+
+ public static void checkValue(String actual, String expected) {
+ if (!expected.equals(actual)) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ static void $noinline$nothing() {}
+ static int $noinline$getField() { return staticField; }
+
+ static float $noinline$test() {
+ // The 2.0f shall be materialized for GreaterThanOrEqual at the beginning of the method;
+ // since the following call clobbers caller-saves, it is allocated to s16.
+ // r0(field) = InvokeStaticOrDirect[]
+ int one = $noinline$getField();
+ // s0(a_1) = TypeConversion[r0(one)]
+ float a = (float)one;
+ // s16(a_2) = Select[s0(a_1), C(2.0f), GreaterThanOrEqual[s0(a_1), s16(2.0f)]]
+ a = a < 2.0f ? a : 2.0f;
+ // The following call is added to clobber caller-saves, forcing the output of the Select
+ // to be allocated to s16.
+ $noinline$nothing();
+ return a;
+ }
+
+ int field;
+ static int staticField;
+}
diff --git a/test/685-deoptimizeable/expected.txt b/test/685-deoptimizeable/expected.txt
new file mode 100644
index 0000000..f993efc
--- /dev/null
+++ b/test/685-deoptimizeable/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Finishing
diff --git a/test/685-deoptimizeable/info.txt b/test/685-deoptimizeable/info.txt
new file mode 100644
index 0000000..d0952f9
--- /dev/null
+++ b/test/685-deoptimizeable/info.txt
@@ -0,0 +1 @@
+Test various cases for full/partial-fragment deoptimization.
diff --git a/test/685-deoptimizeable/src/Main.java b/test/685-deoptimizeable/src/Main.java
new file mode 100644
index 0000000..fc7fdea
--- /dev/null
+++ b/test/685-deoptimizeable/src/Main.java
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+
+class DummyObject {
+ public static boolean sHashCodeInvoked = false;
+ private int i;
+
+ public DummyObject(int i) {
+ this.i = i;
+ }
+
+ public boolean equals(Object obj) {
+ return (obj instanceof DummyObject) && (i == ((DummyObject)obj).i);
+ }
+
+ public int hashCode() {
+ sHashCodeInvoked = true;
+ Main.assertIsManaged();
+ Main.deoptimizeAll();
+ Main.assertIsInterpreted();
+ return i % 64;
+ }
+}
+
+public class Main {
+ static boolean sFlag = false;
+
+ public static native void deoptimizeAll();
+ public static native void undeoptimizeAll();
+ public static native void assertIsInterpreted();
+ public static native void assertIsManaged();
+ public static native void assertCallerIsInterpreted();
+ public static native void disableStackFrameAsserts();
+ public static native boolean hasJit();
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+
+ public static void execute(Runnable runnable) throws Exception {
+ Thread t = new Thread(runnable);
+ t.start();
+ t.join();
+ }
+
+ public static void ensureAllJitCompiled() {
+ ensureJitCompiled(HashMap.class, "hash");
+ ensureJitCompiled(Main.class, "$noinline$run1");
+ ensureJitCompiled(Main.class, "$noinline$run2");
+ ensureJitCompiled(Main.class, "$noinline$run3A");
+ ensureJitCompiled(Main.class, "$noinline$run3B");
+ ensureJitCompiled(DummyObject.class, "hashCode");
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ // Only test stack frames in compiled mode.
+ if (!hasJit()) {
+ disableStackFrameAsserts();
+ }
+
+ ensureAllJitCompiled();
+
+ final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
+
+ // Single-frame deoptimization that covers partial fragment.
+ execute(new Runnable() {
+ public void run() {
+ ensureJitCompiled(this.getClass(), "runInternal");
+ runInternal();
+ }
+
+ public void runInternal() {
+ int[] arr = new int[3];
+ assertIsManaged();
+ int res = $noinline$run1(arr);
+ assertIsManaged(); // Only single frame is deoptimized.
+ if (res != 79) {
+ System.out.println("Failure 1!");
+ System.exit(0);
+ }
+ }
+ });
+
+ // Single-frame deoptimization that covers a full fragment.
+ execute(new Runnable() {
+ public void run() {
+ ensureJitCompiled(this.getClass(), "runInternal");
+ runInternal();
+ }
+
+ public void runInternal() {
+ try {
+ int[] arr = new int[3];
+ assertIsManaged();
+ // Use reflection to call $noinline$run2 so that it does
+ // full-fragment deoptimization since that is an upcall.
+ Class<?> cls = Class.forName("Main");
+ Method method = cls.getDeclaredMethod("$noinline$run2", int[].class);
+ double res = (double)method.invoke(Main.class, arr);
+ assertIsManaged(); // Only single frame is deoptimized.
+ if (res != 79.3d) {
+ System.out.println("Failure 2!");
+ System.exit(0);
+ }
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ }
+ }
+ });
+
+ // Full-fragment deoptimization.
+ execute(new Runnable() {
+ public void run() {
+ ensureJitCompiled(this.getClass(), "runInternal");
+ runInternal();
+ }
+
+ public void runInternal() {
+ assertIsManaged();
+ float res = $noinline$run3B();
+ assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
+ if (res != 0.034f) {
+ System.out.println("Failure 3!");
+ System.exit(0);
+ }
+ }
+ });
+
+ undeoptimizeAll(); // Make compiled code useable again.
+ ensureAllJitCompiled();
+
+ // Partial-fragment deoptimization.
+ execute(new Runnable() {
+ public void run() {
+ ensureJitCompiled(this.getClass(), "runInternal");
+ ensureJitCompiled(HashMap.class, "hash");
+ runInternal();
+ }
+
+ public void runInternal() {
+ try {
+ assertIsManaged();
+ map.put(new DummyObject(10), Long.valueOf(100));
+ assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ }
+ }
+ });
+
+ undeoptimizeAll(); // Make compiled code useable again.
+ ensureAllJitCompiled();
+
+ if (!DummyObject.sHashCodeInvoked) {
+ System.out.println("hashCode() method not invoked!");
+ }
+ if (map.get(new DummyObject(10)) != 100) {
+ System.out.println("Wrong hashmap value!");
+ }
+ System.out.println("Finishing");
+ }
+
+ public static int $noinline$run1(int[] arr) {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ boolean caught = false;
+ // BCE will use deoptimization for the code below.
+ try {
+ arr[0] = 1;
+ arr[1] = 1;
+ arr[2] = 1;
+ // This causes AIOOBE and triggers deoptimization from compiled code.
+ arr[3] = 1;
+ } catch (ArrayIndexOutOfBoundsException e) {
+ assertIsInterpreted(); // Single-frame deoptimization triggered.
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("Expected exception");
+ }
+ assertIsInterpreted();
+ return 79;
+ }
+
+ public static double $noinline$run2(int[] arr) {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ boolean caught = false;
+ // BCE will use deoptimization for the code below.
+ try {
+ arr[0] = 1;
+ arr[1] = 1;
+ arr[2] = 1;
+ // This causes AIOOBE and triggers deoptimization from compiled code.
+ arr[3] = 1;
+ } catch (ArrayIndexOutOfBoundsException e) {
+ assertIsInterpreted(); // Single-frame deoptimization triggered.
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("Expected exception");
+ }
+ assertIsInterpreted();
+ return 79.3d;
+ }
+
+ public static float $noinline$run3A() {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ // Deoptimize callers.
+ deoptimizeAll();
+ assertIsInterpreted();
+ assertCallerIsInterpreted(); // $noinline$run3B is deoptimizeable.
+ return 0.034f;
+ }
+
+ public static float $noinline$run3B() {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ float res = $noinline$run3A();
+ assertIsInterpreted();
+ return res;
+ }
+}
diff --git a/test/685-shifts/expected.txt b/test/685-shifts/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/685-shifts/expected.txt
diff --git a/test/685-shifts/info.txt b/test/685-shifts/info.txt
new file mode 100644
index 0000000..9cf3e6d
--- /dev/null
+++ b/test/685-shifts/info.txt
@@ -0,0 +1 @@
+Tests for the compiler when shift instructions have 0 or 1 as constant shifts.
diff --git a/test/685-shifts/smali/Test.smali b/test/685-shifts/smali/Test.smali
new file mode 100644
index 0000000..f8dfd6f
--- /dev/null
+++ b/test/685-shifts/smali/Test.smali
@@ -0,0 +1,58 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+.super Ljava/lang/Object;
+
+.method public static shlZero(J)J
+ .registers 6
+ const v2, 0x0
+ shl-long v0, p0, v2
+ return-wide v0
+.end method
+
+.method public static shrZero(J)J
+ .registers 6
+ const v2, 0x0
+ shr-long v0, p0, v2
+ return-wide v0
+.end method
+
+.method public static ushrZero(J)J
+ .registers 6
+ const v2, 0x0
+ ushr-long v0, p0, v2
+ return-wide v0
+.end method
+
+.method public static shlOne(J)J
+ .registers 6
+ const v2, 0x1
+ shl-long v0, p0, v2
+ return-wide v0
+.end method
+
+.method public static shrOne(J)J
+ .registers 6
+ const v2, 0x1
+ shr-long v0, p0, v2
+ return-wide v0
+.end method
+
+.method public static ushrOne(J)J
+ .registers 6
+ const v2, 0x1
+ ushr-long v0, p0, v2
+ return-wide v0
+.end method
diff --git a/test/685-shifts/src/Main.java b/test/685-shifts/src/Main.java
new file mode 100644
index 0000000..d186363
--- /dev/null
+++ b/test/685-shifts/src/Main.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ static long smallLong = 42L;
+ static long smallLongShlOne = 84L;
+ static long smallLongShrOne = 21L;
+ static long smallLongUShrOne = 21L;
+ static long longLong = 123456789123456789L;
+ static long longLongShlOne = 246913578246913578L;
+ static long longLongShrOne = 61728394561728394L;
+ static long longLongUShrOne = 61728394561728394L;
+
+ static long negativeSmallLong = -42L;
+ static long negativeSmallLongShlOne = -84L;
+ static long negativeSmallLongShrOne = -21L;
+ static long negativeSmallLongUShrOne = 9223372036854775787L;
+ static long negativeLongLong = -123456789123456789L;
+ static long negativeLongLongShlOne = -246913578246913578L;
+ static long negativeLongLongShrOne = -61728394561728395L;
+ static long negativeLongLongUShrOne = 9161643642293047413L;
+
+ private static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new Error("Expected " + expected + ", got " + actual);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Class<?> c = Class.forName("Test");
+ Method m = c.getMethod("shlZero", long.class);
+ assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+ assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+ m = c.getMethod("shrZero", long.class);
+ assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+ assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+ m = c.getMethod("ushrZero", long.class);
+ assertEquals(smallLong, (Long)m.invoke(null, smallLong));
+ assertEquals(longLong, (Long)m.invoke(null, longLong));
+
+ m = c.getMethod("shlOne", long.class);
+ assertEquals(smallLongShlOne, (Long)m.invoke(null, smallLong));
+ assertEquals(longLongShlOne, (Long)m.invoke(null, longLong));
+
+ m = c.getMethod("shrOne", long.class);
+ assertEquals(smallLongShrOne, (Long)m.invoke(null, smallLong));
+ assertEquals(longLongShrOne, (Long)m.invoke(null, longLong));
+
+ m = c.getMethod("ushrOne", long.class);
+ assertEquals(smallLongUShrOne, (Long)m.invoke(null, smallLong));
+ assertEquals(longLongUShrOne, (Long)m.invoke(null, longLong));
+
+ // Test with negative numbers.
+
+ m = c.getMethod("shlZero", long.class);
+ assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+ m = c.getMethod("shrZero", long.class);
+ assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+ m = c.getMethod("ushrZero", long.class);
+ assertEquals(negativeSmallLong, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLong, (Long)m.invoke(null, negativeLongLong));
+
+ m = c.getMethod("shlOne", long.class);
+ assertEquals(negativeSmallLongShlOne, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLongShlOne, (Long)m.invoke(null, negativeLongLong));
+
+ m = c.getMethod("shrOne", long.class);
+ assertEquals(negativeSmallLongShrOne, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLongShrOne, (Long)m.invoke(null, negativeLongLong));
+
+ m = c.getMethod("ushrOne", long.class);
+ assertEquals(negativeSmallLongUShrOne, (Long)m.invoke(null, negativeSmallLong));
+ assertEquals(negativeLongLongUShrOne, (Long)m.invoke(null, negativeLongLong));
+ }
+}
diff --git a/test/669-moveable-string-class-equals/expected.txt b/test/686-get-this/expected.txt
similarity index 100%
rename from test/669-moveable-string-class-equals/expected.txt
rename to test/686-get-this/expected.txt
diff --git a/test/686-get-this/info.txt b/test/686-get-this/info.txt
new file mode 100644
index 0000000..7227bad
--- /dev/null
+++ b/test/686-get-this/info.txt
@@ -0,0 +1,2 @@
+Test that we can successfully call StackVisitor.GetThis() even when
+'this' gets overwritten.
diff --git a/test/686-get-this/smali/Test.smali b/test/686-get-this/smali/Test.smali
new file mode 100644
index 0000000..533f607
--- /dev/null
+++ b/test/686-get-this/smali/Test.smali
@@ -0,0 +1,45 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTest;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ const/4 v0, 0x1
+ sput v0, LTest;->field:I
+ return-void
+.end method
+
+
+.method public testEmpty()V
+ .registers 2
+ const/4 p0, 0x1
+ invoke-static {}, LMain;->getThisOfCaller()Ljava/lang/Object;
+ move-result-object v0
+ sput-object v0, LMain;->field:Ljava/lang/Object;
+ return-void
+.end method
+
+.method public testPrimitive()I
+ .registers 2
+ sget p0, LTest;->field:I
+ invoke-static {}, LMain;->getThisOfCaller()Ljava/lang/Object;
+ move-result-object v0
+ sput-object v0, LMain;->field:Ljava/lang/Object;
+ return p0
+.end method
+
+.field static public field:I
diff --git a/test/686-get-this/src/Main.java b/test/686-get-this/src/Main.java
new file mode 100644
index 0000000..4ea5301
--- /dev/null
+++ b/test/686-get-this/src/Main.java
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ Class<?> c = Class.forName("Test");
+ ensureJitCompiled(c, "testEmpty");
+ ensureJitCompiled(c, "testPrimitive");
+
+ Method m = c.getMethod("testEmpty");
+ m.invoke(c.newInstance());
+ if (field != null) {
+ throw new Error("Expected null");
+ }
+
+ m = c.getMethod("testPrimitive");
+ int a = (Integer)m.invoke(c.newInstance());
+ if (a != 1) {
+ throw new Error("Expected 1, got " + a);
+ }
+ if (field != null) {
+ throw new Error("Expected null");
+ }
+ }
+
+ public static Object field;
+
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+ public static native Object getThisOfCaller();
+}
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
index 9f4caec..af18193 100644
--- a/test/706-checker-scheduler/src/Main.java
+++ b/test/706-checker-scheduler/src/Main.java
@@ -292,8 +292,7 @@
/// CHECK: <<ArraySet1:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: Add loop:<<Loop>> outer_loop:none
- /// CHECK: <<Addr2:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr2>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: Add loop:<<Loop>> outer_loop:none
/// CHECK-START-ARM64: void Main.CrossOverLoop(int[], int[]) scheduler (after)
@@ -303,13 +302,12 @@
/// CHECK: <<NullA:l\d+>> NullCheck [<<ParamA>>] loop:none
/// CHECK: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK: Add loop:<<Loop>> outer_loop:none
/// CHECK: <<Addr1:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: Add [<<ArrayGet1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: <<ArraySet1:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: <<ArrayGet2:i\d+>> ArrayGet [<<NullB>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: Add loop:<<Loop>> outer_loop:none
- /// CHECK: <<Addr2:i\d+>> IntermediateAddress [<<NullA>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr2>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK: <<ArraySet2:v\d+>> ArraySet [<<Addr1>>,{{i\d+}},{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK: Add loop:<<Loop>> outer_loop:none
private static void CrossOverLoop(int a[], int b[]) {
b[20] = 99;
diff --git a/test/709-checker-varhandles/src/Main.java b/test/709-checker-varhandles/src/Main.java
index 46aaa38..d0ea834 100644
--- a/test/709-checker-varhandles/src/Main.java
+++ b/test/709-checker-varhandles/src/Main.java
@@ -28,7 +28,7 @@
// Fences (native).
//
- /// CHECK-START: void Main.fullFence() intrinsics_recognition (after)
+ /// CHECK-START: void Main.fullFence() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleFullFence
//
/// CHECK-START: void Main.fullFence() instruction_simplifier (after)
@@ -40,7 +40,7 @@
VarHandle.fullFence();
}
- /// CHECK-START: void Main.acquireFence() intrinsics_recognition (after)
+ /// CHECK-START: void Main.acquireFence() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleAcquireFence
//
/// CHECK-START: void Main.acquireFence() instruction_simplifier (after)
@@ -52,7 +52,7 @@
VarHandle.acquireFence();
}
- /// CHECK-START: void Main.releaseFence() intrinsics_recognition (after)
+ /// CHECK-START: void Main.releaseFence() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleReleaseFence
//
/// CHECK-START: void Main.releaseFence() instruction_simplifier (after)
@@ -64,7 +64,7 @@
VarHandle.releaseFence();
}
- /// CHECK-START: void Main.loadLoadFence() intrinsics_recognition (after)
+ /// CHECK-START: void Main.loadLoadFence() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleLoadLoadFence
//
/// CHECK-START: void Main.loadLoadFence() instruction_simplifier (after)
@@ -76,7 +76,7 @@
VarHandle.loadLoadFence();
}
- /// CHECK-START: void Main.storeStoreFence() intrinsics_recognition (after)
+ /// CHECK-START: void Main.storeStoreFence() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect intrinsic:VarHandleStoreStoreFence
//
/// CHECK-START: void Main.storeStoreFence() instruction_simplifier (after)
diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc
index 2a06a7b..521f9a6 100644
--- a/test/906-iterate-heap/iterate_heap.cc
+++ b/test/906-iterate-heap/iterate_heap.cc
@@ -418,5 +418,21 @@
return (status & JVMTI_CLASS_STATUS_INITIALIZED) != 0;
}
+extern "C" JNIEXPORT jint JNICALL Java_art_Test906_iterateOverInstancesCount(
+ JNIEnv* env, jclass, jclass target) {
+ jint cnt = 0;
+ auto count_func = [](jlong, jlong, jlong*, void* user_data) -> jvmtiIterationControl {
+ *reinterpret_cast<jint*>(user_data) += 1;
+ return JVMTI_ITERATION_CONTINUE;
+ };
+ JvmtiErrorToException(env,
+ jvmti_env,
+ jvmti_env->IterateOverInstancesOfClass(target,
+ JVMTI_HEAP_OBJECT_EITHER,
+ count_func,
+ &cnt));
+ return cnt;
+}
+
} // namespace Test906IterateHeap
} // namespace art
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
index be9663a..190f36f 100644
--- a/test/906-iterate-heap/src/art/Test906.java
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -18,6 +18,7 @@
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Random;
public class Test906 {
public static void run() throws Exception {
@@ -69,6 +70,40 @@
throw lastThrow;
}
+ private static Object[] GenTs(Class<?> k) throws Exception {
+ Object[] ret = new Object[new Random().nextInt(100) + 10];
+ for (int i = 0; i < ret.length; i++) {
+ ret[i] = k.newInstance();
+ }
+ return ret;
+ }
+
+ private static void checkEq(int a, int b) {
+ if (a != b) {
+ Error e = new Error("Failed: Expected equal " + a + " and " + b);
+ System.out.println(e);
+ e.printStackTrace(System.out);
+ }
+ }
+
+ public static class Foo {}
+ public static class Bar extends Foo {}
+ public static class Baz extends Bar {}
+ public static class Alpha extends Bar {}
+ public static class MISSING extends Baz {}
+ private static void testIterateOverInstances() throws Exception {
+ Object[] foos = GenTs(Foo.class);
+ Object[] bars = GenTs(Bar.class);
+ Object[] bazs = GenTs(Baz.class);
+ Object[] alphas = GenTs(Alpha.class);
+ checkEq(0, iterateOverInstancesCount(MISSING.class));
+ checkEq(alphas.length, iterateOverInstancesCount(Alpha.class));
+ checkEq(bazs.length, iterateOverInstancesCount(Baz.class));
+ checkEq(bazs.length + alphas.length + bars.length, iterateOverInstancesCount(Bar.class));
+ checkEq(bazs.length + alphas.length + bars.length + foos.length,
+ iterateOverInstancesCount(Foo.class));
+ }
+
public static void doTest() throws Exception {
A a = new A();
B b = new B();
@@ -86,6 +121,8 @@
testHeapCount();
+ testIterateOverInstances();
+
long classTags[] = new long[100];
long sizes[] = new long[100];
long tags[] = new long[100];
@@ -308,6 +345,8 @@
return Main.getTag(o);
}
+ private static native int iterateOverInstancesCount(Class<?> klass);
+
private static native boolean checkInitialized(Class<?> klass);
private static native int iterateThroughHeapCount(int heapFilter,
Class<?> klassFilter, int stopAfter);
diff --git a/test/966-default-conflict/expected.txt b/test/966-default-conflict/expected.txt
index fad2c25..bbd733c 100644
--- a/test/966-default-conflict/expected.txt
+++ b/test/966-default-conflict/expected.txt
@@ -1,3 +1,4 @@
+JNI_OnLoad called
Create Main instance
Calling functions on concrete Main
Calling non-conflicting function on Main
diff --git a/test/966-default-conflict/src/Main.java b/test/966-default-conflict/src/Main.java
index ce8cb47..f466715 100644
--- a/test/966-default-conflict/src/Main.java
+++ b/test/966-default-conflict/src/Main.java
@@ -15,6 +15,13 @@
*/
class Main implements Iface, Iface2 {
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ // Ensure we JIT compile the methods to test CHA behavior with default
+ // methods.
+ ensureJitCompiled(Main.class, "callMain");
+ ensureJitCompiled(Main.class, "callIface");
+ ensureJitCompiled(Main.class, "callIface2");
+
System.out.println("Create Main instance");
Main m = new Main();
System.out.println("Calling functions on concrete Main");
@@ -68,4 +75,6 @@
}
return;
}
+
+ private static native void ensureJitCompiled(Class<?> cls, String method_name);
}
diff --git a/test/979-const-method-handle/expected.txt b/test/979-const-method-handle/expected.txt
index bbaaedb..8531709 100644
--- a/test/979-const-method-handle/expected.txt
+++ b/test/979-const-method-handle/expected.txt
@@ -7,3 +7,11 @@
2.718281828459045
repeatConstMethodHandle()
Attempting to set Math.E raised IAE
+Quack
+Moo
+Woof
+Test
+Getting field in TestTokenizer raised WMTE (woohoo!)
+Stack: tos was 7
+Stack: capacity was 10
+Stack: capacity is 2
diff --git a/test/979-const-method-handle/src/Main.java b/test/979-const-method-handle/src/Main.java
index 427ca7a..5368a22 100644
--- a/test/979-const-method-handle/src/Main.java
+++ b/test/979-const-method-handle/src/Main.java
@@ -18,6 +18,11 @@
import annotations.ConstantMethodType;
import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodType;
+import java.lang.invoke.WrongMethodTypeException;
+
+import java.io.StreamTokenizer;
+import java.io.StringReader;
+import java.util.Stack;
class Main {
/**
@@ -45,6 +50,12 @@
private int field;
}
+ private static class TestTokenizer extends StreamTokenizer {
+ public TestTokenizer(String message) {
+ super(new StringReader(message));
+ }
+ }
+
@ConstantMethodType(
returnType = String.class,
parameterTypes = {int.class, Integer.class, System.class})
@@ -136,6 +147,48 @@
return null;
}
+ @ConstantMethodHandle(
+ kind = ConstantMethodHandle.INSTANCE_GET,
+ owner = "java/io/StreamTokenizer",
+ fieldOrMethodName = "sval",
+ descriptor = "Ljava/lang/String;")
+ private static MethodHandle getSval() {
+ unreachable();
+ return null;
+ }
+
+ // This constant-method-handle references a private instance field. If
+ // referenced in bytecode it raises IAE at load time.
+ @ConstantMethodHandle(
+ kind = ConstantMethodHandle.INSTANCE_PUT,
+ owner = "java/io/StreamTokenizer",
+ fieldOrMethodName = "peekc",
+ descriptor = "I")
+ private static MethodHandle putPeekc() {
+ unreachable();
+ return null;
+ }
+
+ @ConstantMethodHandle(
+ kind = ConstantMethodHandle.INVOKE_VIRTUAL,
+ owner = "java/util/Stack",
+ fieldOrMethodName = "pop",
+ descriptor = "()Ljava/lang/Object;")
+ private static MethodHandle stackPop() {
+ unreachable();
+ return null;
+ }
+
+ @ConstantMethodHandle(
+ kind = ConstantMethodHandle.INVOKE_VIRTUAL,
+ owner = "java/util/Stack",
+ fieldOrMethodName = "trimToSize",
+ descriptor = "()V")
+ private static MethodHandle stackTrim() {
+ unreachable();
+ return null;
+ }
+
private static void repeatConstMethodHandle() throws Throwable {
System.out.println("repeatConstMethodHandle()");
String[] values = {"A", "B", "C"};
@@ -166,5 +219,29 @@
} catch (IllegalAccessError expected) {
System.out.println("Attempting to set Math.E raised IAE");
}
+
+ StreamTokenizer st = new StreamTokenizer(new StringReader("Quack Moo Woof"));
+ while (st.nextToken() != StreamTokenizer.TT_EOF) {
+ System.out.println((String) getSval().invokeExact(st));
+ }
+
+ TestTokenizer tt = new TestTokenizer("Test message 123");
+ tt.nextToken();
+ System.out.println((String) getSval().invoke(tt));
+ try {
+ System.out.println((String) getSval().invokeExact(tt));
+ } catch (WrongMethodTypeException wmte) {
+ System.out.println("Getting field in TestTokenizer raised WMTE (woohoo!)");
+ }
+
+ Stack stack = new Stack();
+ stack.push(Integer.valueOf(3));
+ stack.push(Integer.valueOf(5));
+ stack.push(Integer.valueOf(7));
+ Object tos = stackPop().invokeExact(stack);
+ System.out.println("Stack: tos was " + tos);
+ System.out.println("Stack: capacity was " + stack.capacity());
+ stackTrim().invokeExact(stack);
+ System.out.println("Stack: capacity is " + stack.capacity());
}
}
diff --git a/test/980-redefine-object/redef_object.cc b/test/980-redefine-object/redef_object.cc
index b4d82ad..a8393dc 100644
--- a/test/980-redefine-object/redef_object.cc
+++ b/test/980-redefine-object/redef_object.cc
@@ -80,13 +80,13 @@
public:
explicit JvmtiAllocator(jvmtiEnv* jvmti) : jvmti_(jvmti) {}
- void* Allocate(size_t size) {
+ void* Allocate(size_t size) override {
unsigned char* res = nullptr;
jvmti_->Allocate(size, &res);
return res;
}
- void Free(void* ptr) {
+ void Free(void* ptr) override {
jvmti_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
}
diff --git a/test/Android.bp b/test/Android.bp
index e265651..8c1c1bf 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -291,6 +291,7 @@
"1946-list-descriptors/descriptors.cc",
"1950-unprepared-transform/unprepared_transform.cc",
"1951-monitor-enter-no-suspend/raw_monitor.cc",
+ "1953-pop-frame/pop_frame.cc",
],
// Use NDK-compatible headers for ctstiagent.
header_libs: [
@@ -320,6 +321,7 @@
"983-source-transform-verify/source_transform_art.cc",
"1940-ddms-ext/ddm_ext.cc",
"1944-sudden-exit/sudden_exit.cc",
+ // "1952-pop-frame-jit/pop_frame.cc",
],
static_libs: [
"libz",
@@ -417,10 +419,11 @@
art_cc_defaults {
name: "libtistress-static-defaults",
- defaults: ["libtistress-srcs"],
- static_libs: art_static_dependencies + [
- "slicer",
+ defaults: [
+ "libtistress-srcs",
+ "libart_static_defaults",
],
+ static_libs: ["slicer"],
}
art_cc_test_library {
@@ -453,7 +456,6 @@
"004-UnsafeTest/unsafe_test.cc",
"044-proxy/native_proxy.cc",
"051-thread/thread_test.cc",
- "117-nopatchoat/nopatchoat.cc",
"1337-gc-coverage/gc_coverage.cc",
"136-daemon-jni-shutdown/daemon_jni_shutdown.cc",
"137-cfi/cfi.cc",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ffaa2cd..64c1d4f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -27,32 +27,32 @@
# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
# only once).
-TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES := $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
# Also need libartagent.
-TEST_ART_TARGET_SYNC_DEPS += libartagent-target libartagentd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libartagent-target libartagentd-target
# Also need libtiagent.
-TEST_ART_TARGET_SYNC_DEPS += libtiagent-target libtiagentd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libtiagent-target libtiagentd-target
# Also need libtistress.
-TEST_ART_TARGET_SYNC_DEPS += libtistress-target libtistressd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libtistress-target libtistressd-target
# Also need libarttest.
-TEST_ART_TARGET_SYNC_DEPS += libarttest-target libarttestd-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libarttest-target libarttestd-target
# Also need libnativebridgetest.
-TEST_ART_TARGET_SYNC_DEPS += libnativebridgetest-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libnativebridgetest-target
# Also need libopenjdkjvmti.
-TEST_ART_TARGET_SYNC_DEPS += libopenjdkjvmti-target libopenjdkjvmtid-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
-TEST_ART_TARGET_SYNC_DEPS += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
# All tests require the host executables. The tests also depend on the core images, but on
# specific version depending on the compiler.
diff --git a/test/StackWalk2/StackWalk2.java b/test/StackWalk2/StackWalk2.java
deleted file mode 100644
index 5e7b22c..0000000
--- a/test/StackWalk2/StackWalk2.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class StackWalk2 {
- // use v1 for this
-
- String str = new String(); // use v0 for str in <init>
-
- int f() {
- g(1); // use v0 for 1, v1 for this
- g(2); // use v0 for 2, v1 for this
- strTest(); // use v1 for this
- return 0;
- }
-
- void g(int num_calls) throws RuntimeException {
- if (num_calls == 1) { // use v0 for 1, v3 for num_calls
- System.logI("1st call"); // use v0 for PrintStream, v1 for "1st call"
- refmap2(24); // use v0 for 24, v2 for this
- } else if (num_calls == 2) { // use v0 for 2, v3 for num_calls
- System.logI("2nd call"); // use v0 for PrintStream, v1 for "2nd call"
- refmap2(25); // use v0 for 24, v2 for this
- }
- throw new RuntimeException(); // use v0 for new RuntimeException
- }
-
- void strTest() {
- System.logI(str); // use v1 for PrintStream, v2, v3 for str
- str = null; // use v1 for null, v3 for str
- str = new String("ya"); // use v2 for "ya", v1 for new String
- String s = str; // use v0, v1, v3
- System.logI(str); // use v1 for PrintStream, v2, v3 for str
- System.logI(s); // use v1 for PrintStream, v0 for s
- s = null; // use v0
- System.logI(s); // use v1 for PrintStream, v0 for s
- }
-
- native int refmap2(int x);
-
- public static void main(String[] args) {
- System.loadLibrary(args[0]);
- StackWalk2 st = new StackWalk2();
- st.f();
- }
-}
diff --git a/test/StringLiterals/StringLiterals.java b/test/StringLiterals/StringLiterals.java
new file mode 100644
index 0000000..8dee666
--- /dev/null
+++ b/test/StringLiterals/StringLiterals.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class StringLiterals {
+ void startUpMethod() {
+ String resource = "abcd.apk";
+ System.out.println("Starting up");
+ System.out.println("Loading " + resource);
+ }
+
+ void otherMethod() {
+ System.out.println("Unexpected error");
+ System.out.println("Shutting down!");
+ }
+}
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index da79164..65127fc 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -21,11 +21,13 @@
#include "art_method-inl.h"
#include "base/enums.h"
+#include "common_throws.h"
#include "dex/dex_file-inl.h"
#include "instrumentation.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profiling_info.h"
+#include "jni/jni_internal.h"
#include "mirror/class-inl.h"
#include "nativehelper/ScopedUtfChars.h"
#include "oat_file.h"
@@ -195,6 +197,56 @@
return jit->GetCodeCache()->ContainsMethod(method);
}
+static void ForceJitCompiled(Thread* self, ArtMethod* method) REQUIRES(!Locks::mutator_lock_) {
+ {
+ ScopedObjectAccess soa(self);
+ if (method->IsNative()) {
+ std::string msg(method->PrettyMethod());
+ msg += ": is native";
+ ThrowIllegalArgumentException(msg.c_str());
+ return;
+ } else if (!Runtime::Current()->GetRuntimeCallbacks()->IsMethodSafeToJit(method)) {
+ std::string msg(method->PrettyMethod());
+ msg += ": is not safe to jit!";
+ ThrowIllegalStateException(msg.c_str());
+ return;
+ }
+ }
+ jit::Jit* jit = GetJitIfEnabled();
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ // Update the code cache to make sure the JIT code does not get deleted.
+ // Note: this will apply to all JIT compilations.
+ code_cache->SetGarbageCollectCode(false);
+ while (true) {
+ if (code_cache->WillExecuteJitCode(method)) {
+ break;
+ } else {
+ // Sleep to yield to the compiler thread.
+ usleep(1000);
+ ScopedObjectAccess soa(self);
+ // Make sure there is a profiling info, required by the compiler.
+ ProfilingInfo::Create(self, method, /* retry_allocation */ true);
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(method, self, /* osr */ false);
+ }
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureMethodJitCompiled(JNIEnv*, jclass, jobject meth) {
+ jit::Jit* jit = GetJitIfEnabled();
+ if (jit == nullptr) {
+ return;
+ }
+
+ Thread* self = Thread::Current();
+ ArtMethod* method;
+ {
+ ScopedObjectAccess soa(self);
+ method = ArtMethod::FromReflectedMethod(soa, meth);
+ }
+ ForceJitCompiled(self, method);
+}
+
extern "C" JNIEXPORT void JNICALL Java_Main_ensureJitCompiled(JNIEnv* env,
jclass,
jclass cls,
@@ -219,24 +271,7 @@
}
DCHECK(method != nullptr) << "Unable to find method called " << chars.c_str();
}
-
- jit::JitCodeCache* code_cache = jit->GetCodeCache();
- // Update the code cache to make sure the JIT code does not get deleted.
- // Note: this will apply to all JIT compilations.
- code_cache->SetGarbageCollectCode(false);
- while (true) {
- if (code_cache->WillExecuteJitCode(method)) {
- break;
- } else {
- // Sleep to yield to the compiler thread.
- usleep(1000);
- ScopedObjectAccess soa(self);
- // Make sure there is a profiling info, required by the compiler.
- ProfilingInfo::Create(self, method, /* retry_allocation */ true);
- // Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, self, /* osr */ false);
- }
- }
+ ForceJitCompiled(self, method);
}
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasSingleImplementation(JNIEnv* env,
@@ -292,15 +327,6 @@
code_cache->GetProfiledMethods(unused_locations, unused_vector);
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isClassMoveable(JNIEnv*,
- jclass,
- jclass cls) {
- Runtime* runtime = Runtime::Current();
- ScopedObjectAccess soa(Thread::Current());
- ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
- return runtime->GetHeap()->IsMovableObject(klass);
-}
-
extern "C" JNIEXPORT void JNICALL Java_Main_waitForCompilation(JNIEnv*, jclass) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
@@ -322,4 +348,9 @@
}
}
+extern "C" JNIEXPORT jint JNICALL Java_Main_getJitThreshold(JNIEnv*, jclass) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ return (jit != nullptr) ? jit->HotMethodThreshold() : 0;
+}
+
} // namespace art
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index d74d2ef..581aa74 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -196,4 +196,24 @@
}
}
+struct GetCallingFrameVisitor : public StackVisitor {
+ GetCallingFrameVisitor(Thread* thread, Context* context)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
+
+ bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
+ // Discard stubs and Main.getThisOfCaller.
+ return GetMethod() == nullptr || GetMethod()->IsNative();
+ }
+};
+
+extern "C" JNIEXPORT jobject JNICALL Java_Main_getThisOfCaller(
+ JNIEnv* env, jclass cls ATTRIBUTE_UNUSED) {
+ ScopedObjectAccess soa(env);
+ std::unique_ptr<art::Context> context(art::Context::Create());
+ GetCallingFrameVisitor visitor(soa.Self(), context.get());
+ visitor.WalkStack();
+ return soa.AddLocalReference<jobject>(visitor.GetThisObject());
+}
+
} // namespace art
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index bd58ae3..900b1d7 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -17,7 +17,7 @@
COMPILE_FLAGS=""
DALVIKVM="dalvikvm32"
DEBUGGER="n"
-WITH_AGENT=""
+WITH_AGENT=()
DEBUGGER_AGENT=""
WRAP_DEBUGGER_AGENT="n"
DEV_MODE="n"
@@ -40,7 +40,6 @@
TEST_DIRECTORY="nativetest"
MAIN=""
OPTIMIZE="y"
-PATCHOAT=""
PREBUILD="y"
QUIET="n"
RELOCATE="n"
@@ -59,7 +58,6 @@
VERIFY="y" # y=yes,n=no,s=softfail
ZYGOTE=""
DEX_VERIFY=""
-USE_PATCHOAT="y"
INSTRUCTION_SET_FEATURES=""
ARGS=""
EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
@@ -166,10 +164,6 @@
shift
BOOT_IMAGE="$1"
shift
- elif [ "x$1" = "x--no-patchoat" ]; then
- PATCHOAT="-Xpatchoat:${FALSE_BIN}"
- USE_PATCHOAT="n"
- shift
elif [ "x$1" = "x--relocate" ]; then
RELOCATE="y"
shift
@@ -232,7 +226,7 @@
elif [ "x$1" = "x--with-agent" ]; then
shift
USE_JVMTI="y"
- WITH_AGENT="$1"
+ WITH_AGENT+=("$1")
shift
elif [ "x$1" = "x--debug-wrap-agent" ]; then
WRAP_DEBUGGER_AGENT="y"
@@ -271,6 +265,9 @@
elif [ "x$1" = "x--jit" ]; then
JIT="y"
shift
+ elif [ "x$1" = "x--baseline" ]; then
+ FLAGS="${FLAGS} -Xcompiler-option --baseline"
+ shift
elif [ "x$1" = "x--jvm" ]; then
USE_JVM="y"
shift
@@ -322,10 +319,6 @@
TEST_DIRECTORY="nativetest64"
ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
shift
- elif [ "x$1" = "x--pic-test" ]; then
- FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
- COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
- shift
elif [ "x$1" = "x--experimental" ]; then
if [ "$#" -lt 2 ]; then
echo "missing --experimental option" 1>&2
@@ -454,9 +447,9 @@
DEBUGGER_OPTS="-agentpath:${AGENTPATH}=transport=dt_socket,address=$PORT,server=y,suspend=y"
fi
-if [ "x$WITH_AGENT" != "x" ]; then
- FLAGS="${FLAGS} -agentpath:${WITH_AGENT}"
-fi
+for agent in "${WITH_AGENT[@]}"; do
+ FLAGS="${FLAGS} -agentpath:${agent}"
+done
if [ "$USE_JVMTI" = "y" ]; then
if [ "$USE_JVM" = "n" ]; then
@@ -774,7 +767,6 @@
$FLAGS \
$DEX_VERIFY \
-XXlib:$LIB \
- $PATCHOAT \
$DEX2OAT \
$DALVIKVM_ISA_FEATURES_ARGS \
$ZYGOTE \
@@ -803,15 +795,11 @@
fi
RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}detect_leaks=0"
-# For running, we must turn off logging when dex2oat or patchoat are missing. Otherwise we use
+# For running, we must turn off logging when dex2oat is missing. Otherwise we use
# the same defaults as for prebuilt: everything when --dev, otherwise errors and above only.
if [ "$EXTERNAL_LOG_TAGS" = "n" ]; then
if [ "$DEV_MODE" = "y" ]; then
export ANDROID_LOG_TAGS='*:d'
- elif [ "$USE_PATCHOAT" = "n" ]; then
- # All tests would log the error of failing dex2oat/patchoat. Be silent here and only
- # log fatal events.
- export ANDROID_LOG_TAGS='*:s'
elif [ "$HAVE_IMAGE" = "n" ]; then
# All tests would log the error of missing image. Be silent here and only log fatal
# events.
@@ -988,6 +976,7 @@
if [ "$USE_GDB" = "y" ]; then
# When running under gdb, we cannot do piping and grepping...
+ echo "Run 'gdbclient.py -p <pid printed below>' to debug."
$cmdline "$@"
else
if [ "$TIME_OUT" != "gdb" ]; then
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 0a179c7..f4f45ce 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -13,7 +13,17 @@
},
{
"tests": "080-oom-fragmentation",
- "description": "Disable 080-oom-fragmentation due to flakes.",
+ "description": ["Disable 080-oom-fragmentation for GSS GC due to lack of",
+ "support for allocations larger than 32MB."],
+ "env_vars": {"ART_DEFAULT_GC_TYPE": "GSS"},
+ "bug": "http://b/33795328"
+ },
+ {
+ "tests": "080-oom-fragmentation",
+ "description": ["Disable 080-oom-fragmentation for CC collector in debug mode",
+ "because of potential fragmentation caused by the region space's",
+ "cyclic region allocation (which is enabled in debug mode)."],
+ "variant": "debug",
"bug": "http://b/33795328"
},
{
@@ -60,23 +70,20 @@
"doesn't (and isn't meant to) work with --prebuild."]
},
{
- "tests": ["117-nopatchoat",
- "147-stripped-dex-fallback",
+ "tests": ["147-stripped-dex-fallback",
"608-checker-unresolved-lse"],
"variant": "no-prebuild"
},
{
- "tests": ["117-nopatchoat",
- "118-noimage-dex2oat",
- "119-noimage-patchoat"],
+ "tests": ["118-noimage-dex2oat"],
"variant": "no-relocate",
- "description": ["117-nopatchoat is not broken per-se it just doesn't",
- "work (and isn't meant to) without --prebuild",
+ "description": ["118-noimage-dex2oat is not broken per-se it just ",
+ "doesn't work (and isn't meant to) without --prebuild ",
"--relocate"]
},
{
"tests" : "629-vdex-speed",
- "variant": "interp-ac | interpreter | jit | relocate-npatchoat",
+ "variant": "interp-ac | interpreter | jit",
"description": "629 requires compilation."
},
{
@@ -163,20 +170,18 @@
},
{
"tests": "147-stripped-dex-fallback",
- "variant": "no-image | relocate-npatchoat",
+ "variant": "no-image",
"description": ["147-stripped-dex-fallback is disabled because it",
"requires --prebuild."]
},
{
"tests": ["116-nodex2oat",
- "117-nopatchoat",
"118-noimage-dex2oat",
- "119-noimage-patchoat",
"137-cfi",
"138-duplicate-classes-check2"],
- "variant": "no-image | relocate-npatchoat",
+ "variant": "no-image",
"description": ["All these tests check that we have sane behavior if we",
- "don't have a patchoat or dex2oat. Therefore we",
+ "don't have a dex2oat. Therefore we",
"shouldn't run them in situations where we actually",
"don't have these since they explicitly test for them.",
"These all also assume we have an image."]
@@ -286,10 +291,11 @@
{
"tests": ["454-get-vreg",
"457-regs",
- "602-deoptimizeable"],
+ "602-deoptimizeable",
+ "685-deoptimizeable"],
"description": ["Tests that should fail when the optimizing compiler ",
"compiles them non-debuggable."],
- "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable | speed-profile & ndebuggable | jit & ndebuggable"
+ "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable | speed-profile & ndebuggable | jit & ndebuggable | jit-on-first-use & ndebuggable"
},
{
"tests": "596-app-images",
@@ -331,9 +337,7 @@
{
"tests": ["018-stack-overflow",
"116-nodex2oat",
- "117-nopatchoat",
"118-noimage-dex2oat",
- "119-noimage-patchoat",
"126-miranda-multidex",
"137-cfi"],
"description": "The test run dalvikvm more than once.",
@@ -578,6 +582,30 @@
},
{
"tests": [
+ "018-stack-overflow",
+ "107-int-math2"
+ ],
+ "description": [
+ "Insufficient stack guards for ASAN."
+ ],
+ "variant": "interp-ac & host",
+ "env_vars": {"SANITIZE_HOST": "address"},
+ "bug": "b/31098551"
+ },
+ {
+ "tests": [
+ "018-stack-overflow",
+ "107-int-math2"
+ ],
+ "description": [
+ "Insufficient stack guards for ASAN."
+ ],
+ "variant": "interp-ac & target",
+ "env_vars": {"SANITIZE_TARGET": "address"},
+ "bug": "b/31098551"
+ },
+ {
+ "tests": [
"059-finalizer-throw",
"074-gc-thrash",
"911-get-stack-trace",
@@ -673,9 +701,14 @@
"description": ["Tests that depend on input-vdex are not supported with compact dex"]
},
{
- "tests": "661-oat-writer-layout",
- "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
- "description": ["Test is designed to only check --compiler-filter=speed"]
+ "tests": ["661-oat-writer-layout"],
+ "variant": "interp-ac | interpreter | jit | jit-on-first-use | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
+ "description": ["Test is designed to only check --optimizing"]
+ },
+ {
+ "tests": ["004-StackWalk"],
+ "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress | debuggable",
+ "description": ["Test is designed to only check --optimizing"]
},
{
"tests": "674-HelloWorld-Dm",
@@ -731,9 +764,7 @@
"111-unresolvable-exception",
"115-native-bridge",
"116-nodex2oat",
- "117-nopatchoat",
"118-noimage-dex2oat",
- "119-noimage-patchoat",
"127-checker-secondarydex",
"129-ThreadGetId",
"130-hprof",
@@ -914,6 +945,9 @@
"675-checker-unverified-method",
"676-proxy-jit-at-first-use",
"676-resolve-field-type",
+ "685-deoptimizeable",
+ "685-shifts",
+ "686-get-this",
"706-checker-scheduler",
"707-checker-invalid-profile",
"714-invoke-custom-lambda-metafactory",
@@ -985,7 +1019,8 @@
"description": ["Failing on RI. Needs further investigating."]
},
{
- "tests": ["616-cha-unloading",
+ "tests": ["530-checker-peel-unroll",
+ "616-cha-unloading",
"674-hiddenapi",
"677-fsi2",
"678-quickening",
@@ -1010,7 +1045,7 @@
},
{
"tests": "677-fsi",
- "variant": "no-image | no-prebuild | relocate-npatchoat | jvm",
+ "variant": "no-image | no-prebuild | jvm",
"description": ["Test requires a successful dex2oat invocation"]
},
{
@@ -1053,5 +1088,31 @@
"tests": ["566-polymorphic-inlining"],
"variant": "jit & debuggable",
"description": ["We do not inline with debuggable."]
+ },
+ {
+ "tests": ["1955-pop-frame-jit-called", "1956-pop-frame-jit-calling"],
+ "variant": "jit-on-first-use",
+ "description": [
+ "These tests directly set -Xjitthreshold:1000 to prevent the jit from compiling any",
+ "extra methods. jit-at-first-use would disrupt this."
+ ]
+ },
+ {
+ "tests": ["135-MirandaDispatch"],
+ "variant": "interp-ac & 32 & host",
+ "env_vars": {"SANITIZE_HOST": "address"},
+ "bug": "b/112993554",
+ "description": ["Timeout with ASan and interp-ac on 32-bit host (x86)."]
+ },
+ {
+ "tests": ["454-get-vreg", "457-regs"],
+ "variant": "baseline",
+ "description": ["Tests are expected to fail with baseline."]
+ },
+ {
+ "tests": ["050-sync-test"],
+ "variant": "target & gcstress & debug",
+ "bug": "b/117597114",
+ "description": ["Looks timing dependent"]
}
]
diff --git a/test/run-test b/test/run-test
index ef17302..229e201 100755
--- a/test/run-test
+++ b/test/run-test
@@ -76,9 +76,16 @@
export ANDROID_BUILD_TOP=$oldwd
fi
+# OUT_DIR defaults to out, and may be relative to $ANDROID_BUILD_TOP.
+# Convert it to an absolute path, since we cd into the tmp_dir to run the tests.
+export OUT_DIR=${OUT_DIR:-out}
+if [[ "$OUT_DIR" != /* ]]; then
+ export OUT_DIR=$ANDROID_BUILD_TOP/$OUT_DIR
+fi
+
# ANDROID_HOST_OUT is not set in a build environment.
if [ -z "$ANDROID_HOST_OUT" ]; then
- export ANDROID_HOST_OUT=${OUT_DIR:-$ANDROID_BUILD_TOP/out}/host/linux-x86
+ export ANDROID_HOST_OUT=${OUT_DIR}/host/linux-x86
fi
# Allow changing DESUGAR script to something else, or to disable it with DESUGAR=false.
@@ -148,15 +155,12 @@
strace="false"
always_clean="no"
never_clean="no"
-have_patchoat="yes"
have_image="yes"
-multi_image_suffix=""
android_root="/system"
bisection_search="no"
suspend_timeout="500000"
-# By default we will use optimizing.
-image_args=""
image_suffix=""
+run_optimizing="false"
while true; do
if [ "x$1" = "x--host" ]; then
@@ -180,7 +184,6 @@
target_mode="no"
DEX_LOCATION="$tmp_dir"
runtime="jvm"
- image_args=""
prebuild_mode="no"
NEED_DEX="false"
run_args="${run_args} --jvm"
@@ -194,18 +197,9 @@
lib="libdvm.so"
runtime="dalvik"
shift
- elif [ "x$1" = "x--no-patchoat" ]; then
- have_patchoat="no"
- shift
elif [ "x$1" = "x--no-image" ]; then
have_image="no"
shift
- elif [ "x$1" = "x--multi-image" ]; then
- multi_image_suffix="-multi"
- shift
- elif [ "x$1" = "x--pic-test" ]; then
- run_args="${run_args} --pic-test"
- shift
elif [ "x$1" = "x--relocate" ]; then
relocate="yes"
shift
@@ -313,17 +307,20 @@
image_suffix="-interpreter"
shift
elif [ "x$1" = "x--jit" ]; then
- image_args="--jit"
+ run_args="${run_args} --jit"
image_suffix="-interpreter"
shift
+ elif [ "x$1" = "x--baseline" ]; then
+ run_args="${run_args} --baseline"
+ shift
elif [ "x$1" = "x--optimizing" ]; then
- image_args="-Xcompiler-option --compiler-backend=Optimizing"
+ run_optimizing="true"
shift
elif [ "x$1" = "x--no-verify" ]; then
run_args="${run_args} --no-verify"
shift
elif [ "x$1" = "x--verify-soft-fail" ]; then
- image_args="--verify-soft-fail"
+ run_args="${run_args} --verify-soft-fail"
image_suffix="-interp-ac"
shift
elif [ "x$1" = "x--no-optimize" ]; then
@@ -445,7 +442,6 @@
# The DEX_LOCATION with the chroot prefix, if any.
chroot_dex_location="$chroot$DEX_LOCATION"
-run_args="${run_args} ${image_args}"
# Allocate file descriptor real_stderr and redirect it to the shell's error
# output (fd 2).
if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then
@@ -572,10 +568,6 @@
fi
fi
-if [ "$have_patchoat" = "no" ]; then
- run_args="${run_args} --no-patchoat"
-fi
-
if [ ! "$runtime" = "jvm" ]; then
run_args="${run_args} --lib $lib"
fi
@@ -591,12 +583,12 @@
elif [ "$runtime" = "art" ]; then
if [ "$target_mode" = "no" ]; then
guess_host_arch_name
- run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}${multi_image_suffix}.art"
+ run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}.art"
run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}:${ANDROID_HOST_OUT}/nativetest${suffix64}"
else
guess_target_arch_name
run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}"
- run_args="${run_args} --boot /data/art-test/core${image_suffix}${multi_image_suffix}.art"
+ run_args="${run_args} --boot /data/art-test/core${image_suffix}.art"
fi
if [ "$relocate" = "yes" ]; then
run_args="${run_args} --relocate"
@@ -631,11 +623,6 @@
usage="yes"
fi
-if [ "$bisection_search" = "yes" -a "$have_patchoat" = "no" ]; then
- err_echo "--bisection-search and --no-patchoat are mutually exclusive"
- usage="yes"
-fi
-
# TODO: Chroot-based bisection search is not supported yet (see below); implement it.
if [ "$bisection_search" = "yes" -a -n "$chroot" ]; then
err_echo "--chroot with --bisection-search is unsupported"
@@ -704,7 +691,6 @@
"If used, then the"
echo " other runtime options are ignored."
echo " --no-dex2oat Run as though dex2oat was failing."
- echo " --no-patchoat Run as though patchoat was failing."
echo " --prebuild Run dex2oat on the files before starting test. (default)"
echo " --no-prebuild Do not run dex2oat on the files before starting"
echo " the test."
@@ -741,9 +727,6 @@
echo " --dex2oat-swap Use a dex2oat swap file."
echo " --instruction-set-features [string]"
echo " Set instruction-set-features for compilation."
- echo " --multi-image Use a set of images compiled with dex2oat multi-image for"
- echo " the boot class path."
- echo " --pic-test Compile the test code position independent."
echo " --quiet Don't print anything except failure messages"
echo " --bisection-search Perform bisection bug search."
echo " --vdex Test using vdex as in input to dex2oat. Only works with --prebuild."
@@ -774,7 +757,7 @@
echo "${test_dir}: building..." 1>&2
rm -rf "$tmp_dir"
-cp -Rp "$test_dir" "$tmp_dir"
+cp -LRp "$test_dir" "$tmp_dir"
cd "$tmp_dir"
if [ '!' -r "$build" ]; then
@@ -804,7 +787,7 @@
# Tests named '<number>-checker-*' will also have their CFGs verified with
# Checker when compiled with Optimizing on host.
if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
- if [ "$runtime" = "art" -a "$image_suffix" = "" ]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$run_optimizing" = "true" ]; then
# In no-prebuild or no-image mode, the compiler only quickens so disable the checker.
if [ "$prebuild_mode" = "yes" -a "$have_image" = "yes" ]; then
run_checker="yes"
@@ -827,7 +810,7 @@
fi
fi
- run_args="${run_args} --testlib ${testlib}"
+run_args="${run_args} --testlib ${testlib}"
# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
# ART output to approximately 128MB. This should be more than sufficient
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index 3160079..14287b0 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -63,11 +63,12 @@
os.environ.update(custom_env)
if 'make' in target:
- build_command = 'make'
+ build_command = 'build/soong/soong_ui.bash --make-mode'
build_command += ' DX='
build_command += ' -j' + str(n_threads)
- build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + target.get('make')
+ if env.DIST_DIR:
+ build_command += ' dist'
sys.stdout.write(str(build_command) + '\n')
sys.stdout.flush()
if subprocess.call(build_command.split()):
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 84490bf..978e9cb3 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -1,23 +1,23 @@
target_config = {
-# Configuration syntax:
-#
-# Required keys: (Use one or more of these)
-# * golem - specify a golem machine-type to build, e.g. android-armv8
-# (uses art/tools/golem/build-target.sh)
-# * make - specify a make target to build, e.g. build-art-host
-# * run-test - runs the tests in art/test/ directory with testrunner.py,
-# specify a list of arguments to pass to testrunner.py
-#
-# Optional keys: (Use any of these)
-# * env - Add additional environment variable to the current environment.
-#
-# *** IMPORTANT ***:
-# This configuration is used by the android build server. Targets must not be renamed
-# or removed.
-#
+ # Configuration syntax:
+ #
+ # Required keys: (Use one or more of these)
+ # * golem - specify a golem machine-type to build, e.g. android-armv8
+ # (uses art/tools/golem/build-target.sh)
+ # * make - specify a make target to build, e.g. build-art-host
+ # * run-test - runs the tests in art/test/ directory with testrunner.py,
+ # specify a list of arguments to pass to testrunner.py
+ #
+ # Optional keys: (Use any of these)
+ # * env - Add additional environment variable to the current environment.
+ #
+ # *** IMPORTANT ***:
+ # This configuration is used by the android build server. Targets must not be renamed
+ # or removed.
+ #
-##########################################
+ ##########################################
# General ART configurations.
# Calls make and testrunner both.
@@ -47,61 +47,45 @@
'run-test' : ['--jit', '--debuggable', '--ndebuggable']
},
'art-jit-on-first-use' : {
- 'run-test' : ['--jit',
- '--runtime-option=-Xjitthreshold:0']
+ 'run-test' : ['--jit-on-first-use']
},
'art-pictest' : {
- 'run-test' : ['--pictest',
- '--optimizing']
+ # Deprecated config: All AOT-compiled code is PIC now.
+ 'run-test' : ['--optimizing']
},
'art-gcstress-gcverify': {
# Do not exercise '--interpreter', '--optimizing', nor '--jit' in this
- # configuration, as they are covered by the 'art-interpreter-gcstress',
- # 'art-optimizing-gcstress' and 'art-jit-gcstress' configurations below.
+ # configuration, as they are covered by the
+ # 'art-interpreter-gcstress-gcverify',
+ # 'art-optimizing-gcstress-gcverify' and 'art-jit-gcstress-gcverify'
+ # configurations below.
'run-test': ['--interp-ac',
'--speed-profile',
'--gcstress',
'--gcverify']
},
- # Rename this configuration as 'art-interpreter-gcstress-gcverify' (b/62611253).
- 'art-interpreter-gcstress' : {
+ 'art-interpreter-gcstress-gcverify' : {
'run-test' : ['--interpreter',
'--gcstress',
'--gcverify']
},
- # Rename this configuration as 'art-optimizing-gcstress-gcverify' (b/62611253).
- 'art-optimizing-gcstress' : {
+ 'art-optimizing-gcstress-gcverify' : {
'run-test' : ['--optimizing',
'--gcstress',
'--gcverify']
},
- # Rename this configuration as 'art-jit-gcstress-gcverify' (b/62611253).
- 'art-jit-gcstress' : {
+ 'art-jit-gcstress-gcverify' : {
'run-test' : ['--jit',
'--gcstress',
'--gcverify']
},
'art-jit-on-first-use-gcstress' : {
- 'run-test' : ['--jit',
- '--gcstress',
- '--runtime-option=-Xjitthreshold:0']
+ 'run-test' : ['--jit-on-first-use',
+ '--gcstress']
},
- # TODO: Rename or repurpose this configuration as
- # 'art-read-barrier-heap-poisoning' (b/62611253).
- 'art-read-barrier' : {
+ 'art-read-barrier-heap-poisoning' : {
'run-test': ['--interpreter',
- '--optimizing'],
- 'env' : {
- 'ART_HEAP_POISONING' : 'true'
- }
- },
- # TODO: Remove or disable this configuration, as it is now covered
- # by 'art-interpreter-gcstress' and 'art-optimizing-gcstress' --
- # except for heap poisoning, but that's fine (b/62611253).
- 'art-read-barrier-gcstress' : {
- 'run-test' : ['--interpreter',
- '--optimizing',
- '--gcstress'],
+ '--optimizing'],
'env' : {
'ART_HEAP_POISONING' : 'true'
}
@@ -122,6 +106,9 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Consider removing this configuration when it is no longer used by
+ # any continuous testing target (b/62611253), as the SS collector overlaps
+ # with the CC collector, since both move objects.
'art-ss-gc' : {
'run-test' : ['--interpreter',
'--optimizing',
@@ -131,6 +118,7 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
'art-gss-gc' : {
'run-test' : ['--interpreter',
'--optimizing',
@@ -140,6 +128,9 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Consider removing this configuration when it is no longer used by
+ # any continuous testing target (b/62611253), as the SS collector overlaps
+ # with the CC collector, since both move objects.
'art-ss-gc-tlab' : {
'run-test' : ['--interpreter',
'--optimizing',
@@ -150,6 +141,7 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
'art-gss-gc-tlab' : {
'run-test' : ['--interpreter',
'--optimizing',
@@ -180,12 +172,6 @@
'run-test' : ['--interpreter',
'--no-image']
},
- 'art-relocate-no-patchoat' : {
- 'run-test' : ['--relocate-npatchoat']
- },
- 'art-no-dex2oat' : {
- # Deprecated configuration.
- },
'art-heap-poisoning' : {
'run-test' : ['--interpreter',
'--optimizing',
@@ -199,10 +185,9 @@
},
'art-preopt' : {
# This test configuration is intended to be representative of the case
- # of preopted apps, which are precompiled compiled pic against an
+ # of preopted apps, which are precompiled against an
# unrelocated image, then used with a relocated image.
- 'run-test' : ['--pictest',
- '--prebuild',
+ 'run-test' : ['--prebuild',
'--relocate',
'--jit']
},
@@ -226,6 +211,9 @@
'ART_HEAP_POISONING' : 'true'
}
},
+ # TODO: Consider removing this configuration when it is no longer used by
+ # any continuous testing target (b/62611253), as the SS collector overlaps
+ # with the CC collector, since both move objects.
'art-gtest-ss-gc': {
'make' : 'test-art-host-gtest',
'env': {
@@ -235,6 +223,7 @@
'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
'art-gtest-gss-gc': {
'make' : 'test-art-host-gtest',
'env' : {
@@ -242,6 +231,9 @@
'ART_USE_READ_BARRIER' : 'false'
}
},
+ # TODO: Consider removing this configuration when it is no longer used by
+ # any continuous testing target (b/62611253), as the SS collector overlaps
+ # with the CC collector, since both move objects.
'art-gtest-ss-gc-tlab': {
'make' : 'test-art-host-gtest',
'env': {
@@ -250,6 +242,7 @@
'ART_USE_READ_BARRIER' : 'false',
}
},
+ # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
'art-gtest-gss-gc-tlab': {
'make' : 'test-art-host-gtest',
'env': {
@@ -273,10 +266,10 @@
}
},
- # ASAN (host) configurations.
+ # ASAN (host) configurations.
- # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
- # as our build tools leak. b/37751350
+ # These configurations need detect_leaks=0 to work in non-setup environments like build bots,
+ # as our build tools leak. b/37751350
'art-gtest-asan': {
'make' : 'test-art-host-gtest',
@@ -306,11 +299,11 @@
}
},
- # ART Golem build targets used by go/lem (continuous ART benchmarking),
- # (art-opt-cc is used by default since it mimics the default preopt config),
- #
- # calls golem/build-target.sh which builds a golem tarball of the target name,
- # e.g. 'golem: android-armv7' produces an 'android-armv7.tar.gz' upon success.
+ # ART Golem build targets used by go/lem (continuous ART benchmarking),
+ # (art-opt-cc is used by default since it mimics the default preopt config),
+ #
+ # calls golem/build-target.sh which builds a golem tarball of the target name,
+ # e.g. 'golem: android-armv7' produces an 'android-armv7.tar.gz' upon success.
'art-golem-android-armv7': {
'golem' : 'android-armv7'
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 10c8619..4e873c1 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -31,7 +31,7 @@
There are various options to invoke the script which are:
-t: Either the test name as in art/test or the test name including the variant
information. Eg, "-t 001-HelloWorld",
- "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32"
+ "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32"
-j: Number of thread workers to be used. Eg - "-j64"
--dry-run: Instead of running the test name, just print its name.
--verbose
@@ -117,6 +117,7 @@
gdb = False
gdb_arg = ''
runtime_option = ''
+with_agent = []
run_test_option = []
stop_testrunner = False
dex2oat_jobs = -1 # -1 corresponds to default threads for dex2oat
@@ -138,22 +139,21 @@
global TOTAL_VARIANTS_SET
global DISABLED_TEST_CONTAINER
# TODO: Avoid duplication of the variant names in different lists.
- VARIANT_TYPE_DICT['pictest'] = {'pictest', 'npictest'}
VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'}
- VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image', 'multipicimage'}
+ VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'}
VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'}
VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'}
VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'}
VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'}
- VARIANT_TYPE_DICT['relocate'] = {'relocate-npatchoat', 'relocate', 'no-relocate'}
+ VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'}
VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'}
VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'}
VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
'field-stress', 'step-stress'}
- VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'optimizing',
- 'regalloc_gc', 'speed-profile'}
+ VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
+ 'optimizing', 'regalloc_gc', 'speed-profile', 'baseline'}
for v_type in VARIANT_TYPE_DICT:
TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
@@ -180,7 +180,6 @@
# These are the default variant-options we will use if nothing in the group is specified.
default_variants = {
'target': {'host', 'target'},
- 'pictest': {'npictest'},
'prebuild': {'prebuild'},
'cdex_level': {'cdex-fast'},
'jvmti': { 'no-jvmti'},
@@ -194,7 +193,6 @@
'gc': {'cms'},
'jni': {'checkjni'},
'image': {'picimage'},
- 'pictest': {'pictest'},
'debuggable': {'ndebuggable'},
'run': {'debug'},
# address_sizes_target depends on the target so it is dealt with below.
@@ -333,6 +331,9 @@
if runtime_option:
for opt in runtime_option:
options_all += ' --runtime-option ' + opt
+ if with_agent:
+ for opt in with_agent:
+ options_all += ' --with-agent ' + opt
if dex2oat_jobs != -1:
options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs)
@@ -342,7 +343,7 @@
user_input_variants['prebuild'], user_input_variants['compiler'],
user_input_variants['relocate'], user_input_variants['trace'],
user_input_variants['gc'], user_input_variants['jni'],
- user_input_variants['image'], user_input_variants['pictest'],
+ user_input_variants['image'],
user_input_variants['debuggable'], user_input_variants['jvmti'],
user_input_variants['cdex_level'])
return config
@@ -355,13 +356,13 @@
'prebuild': [''], 'compiler': [''],
'relocate': [''], 'trace': [''],
'gc': [''], 'jni': [''],
- 'image': [''], 'pictest': [''],
+ 'image': [''],
'debuggable': [''], 'jvmti': [''],
'cdex_level': ['']})
def start_combination(config_tuple, address_size):
test, target, run, prebuild, compiler, relocate, trace, gc, \
- jni, image, pictest, debuggable, jvmti, cdex_level = config_tuple
+ jni, image, debuggable, jvmti, cdex_level = config_tuple
if stop_testrunner:
# When ART_TEST_KEEP_GOING is set to false, then as soon as a test
@@ -383,7 +384,6 @@
test_name += gc + '-'
test_name += jni + '-'
test_name += image + '-'
- test_name += pictest + '-'
test_name += debuggable + '-'
test_name += jvmti + '-'
test_name += cdex_level + '-'
@@ -391,7 +391,7 @@
test_name += address_size
variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni,
- image, pictest, debuggable, jvmti, cdex_level, address_size}
+ image, debuggable, jvmti, cdex_level, address_size}
options_test = options_all
@@ -429,15 +429,17 @@
options_test += ' --interpreter --verify-soft-fail'
elif compiler == 'jit':
options_test += ' --jit'
+ elif compiler == 'jit-on-first-use':
+ options_test += ' --jit --runtime-option -Xjitthreshold:0'
elif compiler == 'speed-profile':
options_test += ' --random-profile'
+ elif compiler == 'baseline':
+ options_test += ' --baseline'
if relocate == 'relocate':
options_test += ' --relocate'
elif relocate == 'no-relocate':
options_test += ' --no-relocate'
- elif relocate == 'relocate-npatchoat':
- options_test += ' --relocate --no-patchoat'
if trace == 'trace':
options_test += ' --trace'
@@ -456,11 +458,6 @@
if image == 'no-image':
options_test += ' --no-image'
- elif image == 'multipicimage':
- options_test += ' --multi-image'
-
- if pictest == 'pictest':
- options_test += ' --pic-test'
if debuggable == 'debuggable':
options_test += ' --debuggable'
@@ -813,7 +810,7 @@
It supports two types of test_name:
1) Like 001-HelloWorld. In this case, it will just verify if the test actually
exists and if it does, it returns the testname.
- 2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-npictest-ndebuggable-001-HelloWorld32
+ 2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32
In this case, it will parse all the variants and check if they are placed
correctly. If yes, it will set the various VARIANT_TYPES to use the
variants required to run the test. Again, it returns the test_name
@@ -837,7 +834,6 @@
regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-'
regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-'
regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-'
- regex += '(' + '|'.join(VARIANT_TYPE_DICT['pictest']) + ')-'
regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-'
regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-'
regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-'
@@ -854,12 +850,11 @@
_user_input_variants['gc'].add(match.group(7))
_user_input_variants['jni'].add(match.group(8))
_user_input_variants['image'].add(match.group(9))
- _user_input_variants['pictest'].add(match.group(10))
- _user_input_variants['debuggable'].add(match.group(11))
- _user_input_variants['jvmti'].add(match.group(12))
- _user_input_variants['cdex_level'].add(match.group(13))
- _user_input_variants['address_sizes'].add(match.group(15))
- return {match.group(14)}
+ _user_input_variants['debuggable'].add(match.group(10))
+ _user_input_variants['jvmti'].add(match.group(11))
+ _user_input_variants['cdex_level'].add(match.group(12))
+ _user_input_variants['address_sizes'].add(match.group(14))
+ return {match.group(13)}
raise ValueError(test_name + " is not a valid test")
@@ -911,6 +906,7 @@
global timeout
global dex2oat_jobs
global run_all_configs
+ global with_agent
parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.")
parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)')
@@ -943,6 +939,8 @@
This should be enclosed in single-quotes to allow for spaces. The option
will be split using shlex.split() prior to invoking run-test.
Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
+ global_group.add_argument('--with-agent', action='append', dest='with_agent',
+ help="""Pass an agent to be attached to the runtime""")
global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
help="""Pass an option to the runtime. Runtime options
starting with a '-' must be separated by a '=', for
@@ -991,6 +989,7 @@
if options['gdb_arg']:
gdb_arg = options['gdb_arg']
runtime_option = options['runtime_option'];
+ with_agent = options['with_agent'];
run_test_option = sum(map(shlex.split, options['run_test_option']), [])
timeout = options['timeout']
@@ -1013,10 +1012,8 @@
build_targets += 'test-art-target-run-test-dependencies '
if 'jvm' in _user_input_variants['target']:
build_targets += 'test-art-host-run-test-dependencies '
- build_command = 'make'
+ build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode'
build_command += ' DX='
- build_command += ' -j'
- build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + build_targets
if subprocess.call(build_command.split()):
# Debugging for b/62653020
diff --git a/test/ti-agent/jni_binder.cc b/test/ti-agent/jni_binder.cc
index 32236de..a115c22 100644
--- a/test/ti-agent/jni_binder.cc
+++ b/test/ti-agent/jni_binder.cc
@@ -174,7 +174,7 @@
class_loader));
}
-jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
+jclass GetClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
if (class_loader != nullptr) {
return FindClassWithClassLoader(env, class_name, class_loader);
}
@@ -223,7 +223,7 @@
}
// TODO: Implement scanning *all* classloaders.
- LOG(FATAL) << "Unimplemented";
+ LOG(WARNING) << "Scanning all classloaders unimplemented";
return nullptr;
}
@@ -251,7 +251,7 @@
void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader) {
// Use JNI to load the class.
- ScopedLocalRef<jclass> klass(env, FindClass(jvmti_env, env, class_name, class_loader));
+ ScopedLocalRef<jclass> klass(env, GetClass(jvmti_env, env, class_name, class_loader));
CHECK(klass.get() != nullptr) << class_name;
BindFunctionsOnClass(jvmti_env, env, klass.get());
}
diff --git a/test/ti-agent/jni_binder.h b/test/ti-agent/jni_binder.h
index e998dc5..3d2ff9c 100644
--- a/test/ti-agent/jni_binder.h
+++ b/test/ti-agent/jni_binder.h
@@ -24,7 +24,7 @@
// Find the given classname. First try the implied classloader, then the system classloader,
// then use JVMTI to find all classloaders.
-jclass FindClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader);
+jclass GetClass(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name, jobject class_loader);
// Load the class through JNI. Inspect it, find all native methods. Construct the corresponding
// mangled name, run dlsym and bind the method.
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index bd320c6..e123e9f 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -92,7 +92,7 @@
struct Allocator : public dex::Writer::Allocator {
explicit Allocator(jvmtiEnv* jvmti_env) : jvmti_env_(jvmti_env) {}
- virtual void* Allocate(size_t size) {
+ void* Allocate(size_t size) override {
unsigned char* out = nullptr;
if (JVMTI_ERROR_NONE != jvmti_env_->Allocate(size, &out)) {
return nullptr;
@@ -100,7 +100,7 @@
return out;
}
}
- virtual void Free(void* ptr) {
+ void Free(void* ptr) override {
jvmti_env_->Deallocate(reinterpret_cast<unsigned char*>(ptr));
}
private:
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 3b5230f..dcd9105 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -104,9 +104,17 @@
$(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
cp $(PRIVATE_AHAT_SOURCE_PROGUARD_MAP) $@
+ifeq (true,$(HOST_PREFER_32_BIT))
+ AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm32
+ AHAT_TEST_DALVIKVM_ARG := --32
+else
+ AHAT_TEST_DALVIKVM_DEP := $(HOST_OUT_EXECUTABLES)/dalvikvm64
+ AHAT_TEST_DALVIKVM_ARG := --64
+endif
+
# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
- $(HOST_OUT_EXECUTABLES)/dalvikvm64 \
+ $(AHAT_TEST_DALVIKVM_DEP) \
$(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
$(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
@@ -114,20 +122,24 @@
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_ANDROID_DATA)
+$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
$(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+ $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+ -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ANDROID_DATA := $(AHAT_TEST_DUMP_BASE_ANDROID_DATA)
+$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DALVIKVM_ARG := $(AHAT_TEST_DALVIKVM_ARG)
$(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+ $(PRIVATE_AHAT_TEST_ART) -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
+ -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
# --- ahat-ri-test-dump.jar -------
include $(CLEAR_VARS)
@@ -150,6 +162,7 @@
java -cp $(PRIVATE_AHAT_RI_TEST_DUMP_JAR) Main $@
# --- ahat-tests.jar --------------
+# To run these tests, use: atest ahat-tests --host
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(call all-java-files-under, src/test)
LOCAL_JAR_MANIFEST := etc/ahat-tests.mf
@@ -170,10 +183,6 @@
include $(BUILD_HOST_JAVA_LIBRARY)
AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE)
-.PHONY: ahat-test
-ahat-test: PRIVATE_AHAT_TEST_JAR := $(AHAT_TEST_JAR)
-ahat-test: $(AHAT_TEST_JAR)
- java -enableassertions -jar $(PRIVATE_AHAT_TEST_JAR)
endif # EMMA_INSTRUMENT
endif # linux
diff --git a/tools/ahat/TEST_MAPPING b/tools/ahat/TEST_MAPPING
new file mode 100644
index 0000000..603e95d
--- /dev/null
+++ b/tools/ahat/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "ahat-tests"
+ }
+ ]
+}
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index 5426f7b..01e00e9 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -73,6 +73,9 @@
method public com.android.ahat.heapdump.AhatInstance getAssociatedBitmapInstance();
method public com.android.ahat.heapdump.AhatClassObj getAssociatedClassForOverhead();
method public com.android.ahat.heapdump.AhatInstance getBaseline();
+ method public java.lang.String getBinderProxyInterfaceName();
+ method public java.lang.String getBinderStubInterfaceName();
+ method public java.lang.String getBinderTokenDescriptor();
method public java.lang.String getClassName();
method public com.android.ahat.heapdump.AhatClassObj getClassObj();
method public java.lang.String getDexCacheLocation(int);
@@ -96,6 +99,7 @@
method public boolean isArrayInstance();
method public boolean isClassInstance();
method public boolean isClassObj();
+ method public boolean isInstanceOfClass(java.lang.String);
method public boolean isPlaceHolder();
method public boolean isRoot();
method public boolean isStronglyReachable();
@@ -226,6 +230,7 @@
method public int getLineNumber();
method public java.lang.String getMethodName();
method public void getObjects(java.lang.String, java.lang.String, java.util.Collection<com.android.ahat.heapdump.AhatInstance>);
+ method public void getObjects(java.util.function.Predicate<com.android.ahat.heapdump.AhatInstance>, java.util.function.Consumer<com.android.ahat.heapdump.AhatInstance>);
method public java.util.List<com.android.ahat.heapdump.Site.ObjectsInfo> getObjectsInfos();
method public com.android.ahat.heapdump.Site getParent();
method public java.lang.String getSignature();
diff --git a/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java
index 1a8f018..81611b6 100644
--- a/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java
+++ b/tools/ahat/src/main/com/android/ahat/ObjectsHandler.java
@@ -16,6 +16,7 @@
package com.android.ahat;
+import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.Site;
@@ -24,6 +25,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.function.Predicate;
class ObjectsHandler implements AhatHandler {
private static final String OBJECTS_ID = "objects";
@@ -34,32 +36,102 @@
mSnapshot = snapshot;
}
+ /**
+ * Get the list of instances that match the given site, class, and heap
+ * filters. This method is public to facilitate testing.
+ *
+ * @param site the site to get instances from
+ * @param className non-null name of the class to restrict instances to.
+ * @param subclass if true, include instances of subclasses of the named class.
+ * @param heapName name of the heap to restrict instances to. May be null to
+ * allow instances on any heap.
+ * @return list of matching instances
+ */
+ public static List<AhatInstance> getObjects(
+ Site site, String className, boolean subclass, String heapName) {
+ Predicate<AhatInstance> predicate = (x) -> {
+ return (heapName == null || x.getHeap().getName().equals(heapName))
+ && (subclass ? x.isInstanceOfClass(className) : className.equals(x.getClassName()));
+ };
+
+ List<AhatInstance> insts = new ArrayList<AhatInstance>();
+ site.getObjects(predicate, x -> insts.add(x));
+ return insts;
+ }
+
@Override
public void handle(Doc doc, Query query) throws IOException {
int id = query.getInt("id", 0);
- String className = query.get("class", null);
+ String className = query.get("class", "java.lang.Object");
String heapName = query.get("heap", null);
+ boolean subclass = (query.getInt("subclass", 0) != 0);
Site site = mSnapshot.getSite(id);
- List<AhatInstance> insts = new ArrayList<AhatInstance>();
- site.getObjects(heapName, className, insts);
+ List<AhatInstance> insts = getObjects(site, className, subclass, heapName);
Collections.sort(insts, Sort.defaultInstanceCompare(mSnapshot));
- doc.title("Objects");
+ doc.title("Instances");
- SizeTable.table(doc, mSnapshot.isDiffed(),
- new Column("Heap"),
- new Column("Object"));
+ // Write a description of the current settings, with links to adjust the
+ // settings, such as:
+ // Site: ROOT -
+ // Class: android.os.Binder
+ // Subclasses: excluded (switch to included)
+ // Heap: any (switch to app, image, zygote)
+ // Count: 17,424
+ doc.descriptions();
+ doc.description(DocString.text("Site"), Summarizer.summarize(site));
+ doc.description(DocString.text("Class"), DocString.text(className));
- SubsetSelector<AhatInstance> selector = new SubsetSelector(query, OBJECTS_ID, insts);
- for (AhatInstance inst : selector.selected()) {
- AhatInstance base = inst.getBaseline();
- SizeTable.row(doc, inst.getSize(), base.getSize(),
- DocString.text(inst.getHeap().getName()),
- Summarizer.summarize(inst));
+ DocString subclassChoice = DocString.text(subclass ? "included" : "excluded");
+ subclassChoice.append(" (switch to ");
+ subclassChoice.appendLink(query.with("subclass", subclass ? 0 : 1),
+ DocString.text(subclass ? "excluded" : "included"));
+ subclassChoice.append(")");
+ doc.description(DocString.text("Subclasses"), subclassChoice);
+
+ DocString heapChoice = DocString.text(heapName == null ? "any" : heapName);
+ heapChoice.append(" (switch to ");
+ String comma = "";
+ for (AhatHeap heap : mSnapshot.getHeaps()) {
+ if (!heap.getName().equals(heapName)) {
+ heapChoice.append(comma);
+ heapChoice.appendLink(
+ query.with("heap", heap.getName()),
+ DocString.text(heap.getName()));
+ comma = ", ";
+ }
}
- SizeTable.end(doc);
- selector.render(doc);
+ if (heapName != null) {
+ heapChoice.append(comma);
+ heapChoice.appendLink(
+ query.with("heap", null),
+ DocString.text("any"));
+ }
+ heapChoice.append(")");
+ doc.description(DocString.text("Heap"), heapChoice);
+
+ doc.description(DocString.text("Count"), DocString.format("%,14d", insts.size()));
+ doc.end();
+ doc.println(DocString.text(""));
+
+ if (insts.isEmpty()) {
+ doc.println(DocString.text("(none)"));
+ } else {
+ SizeTable.table(doc, mSnapshot.isDiffed(),
+ new Column("Heap"),
+ new Column("Object"));
+
+ SubsetSelector<AhatInstance> selector = new SubsetSelector(query, OBJECTS_ID, insts);
+ for (AhatInstance inst : selector.selected()) {
+ AhatInstance base = inst.getBaseline();
+ SizeTable.row(doc, inst.getSize(), base.getSize(),
+ DocString.text(inst.getHeap().getName()),
+ Summarizer.summarize(inst));
+ }
+ SizeTable.end(doc);
+ selector.render(doc);
+ }
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/Query.java b/tools/ahat/src/main/com/android/ahat/Query.java
index 9c2783c..5f064cd 100644
--- a/tools/ahat/src/main/com/android/ahat/Query.java
+++ b/tools/ahat/src/main/com/android/ahat/Query.java
@@ -79,7 +79,9 @@
/**
* Return a uri suitable for an href target that links to the current
* page, except with the named query parameter set to the new value.
- *
+ * <p>
+ * <code>value</code> may be null to remove the named query parameter.
+ * <p>
* The generated parameters will be sorted alphabetically so it is easier to
* test.
*/
@@ -92,11 +94,13 @@
params.put(name, value);
String and = "";
for (Map.Entry<String, String> entry : params.entrySet()) {
- newQuery.append(and);
- newQuery.append(entry.getKey());
- newQuery.append('=');
- newQuery.append(entry.getValue());
- and = "&";
+ if (entry.getValue() != null) {
+ newQuery.append(and);
+ newQuery.append(entry.getKey());
+ newQuery.append('=');
+ newQuery.append(entry.getValue());
+ and = "&";
+ }
}
return DocString.uri(newQuery.toString());
}
diff --git a/tools/ahat/src/main/com/android/ahat/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java
index ab88c04..df3b577 100644
--- a/tools/ahat/src/main/com/android/ahat/Summarizer.java
+++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java
@@ -112,6 +112,24 @@
formatted.append(" overhead for ");
formatted.append(summarize(cls));
}
+
+ // Annotate BinderProxy with its interface name.
+ String binderProxyInterface = inst.getBinderProxyInterfaceName();
+ if (binderProxyInterface != null) {
+ formatted.appendFormat(" for %s", binderProxyInterface);
+ }
+
+ // Annotate Binder tokens with their descriptor
+ String binderTokenDescriptor = inst.getBinderTokenDescriptor();
+ if (binderTokenDescriptor != null) {
+ formatted.appendFormat(" binder token (%s)", binderTokenDescriptor);
+ }
+ // Annotate Binder services with their interface name.
+ String binderStubInterface = inst.getBinderStubInterfaceName();
+ if (binderStubInterface != null) {
+ formatted.appendFormat(" binder service (%s)", binderStubInterface);
+ }
+
return formatted;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
index 4c60d8b..d2ba68d 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
@@ -107,21 +107,6 @@
return new ReferenceIterator();
}
- /**
- * Returns true if this is an instance of a (subclass of a) class with the
- * given name.
- */
- private boolean isInstanceOfClass(String className) {
- AhatClassObj cls = getClassObj();
- while (cls != null) {
- if (className.equals(cls.getName())) {
- return true;
- }
- cls = cls.getSuperClassObj();
- }
- return false;
- }
-
@Override public String asString(int maxChars) {
if (!isInstanceOfClass("java.lang.String")) {
return null;
@@ -160,6 +145,52 @@
return null;
}
+ @Override public String getBinderProxyInterfaceName() {
+ if (isInstanceOfClass("android.os.BinderProxy")) {
+ for (AhatInstance inst : getReverseReferences()) {
+ String className = inst.getClassName();
+ if (className.endsWith("$Stub$Proxy")) {
+ Value value = inst.getField("mRemote");
+ if (value != null && value.asAhatInstance() == this) {
+ return className.substring(0, className.lastIndexOf("$Stub$Proxy"));
+ }
+ }
+ }
+ }
+ return null;
+ }
+
+ @Override public String getBinderTokenDescriptor() {
+ String descriptor = getBinderDescriptor();
+ if (descriptor == null) {
+ return null;
+ }
+
+ if (isInstanceOfClass(descriptor + "$Stub")) {
+ // This is an instance of an auto-generated interface class, and
+ // therefore not a binder token.
+ return null;
+ }
+
+ return descriptor;
+ }
+
+ @Override public String getBinderStubInterfaceName() {
+ String descriptor = getBinderDescriptor();
+ if (descriptor == null || descriptor.isEmpty()) {
+ // Binder interface stubs always have a non-empty descriptor
+ return null;
+ }
+
+ // We only consider something a binder service if it's an instance of the
+ // auto-generated descriptor$Stub class.
+ if (isInstanceOfClass(descriptor + "$Stub")) {
+ return descriptor;
+ }
+
+ return null;
+ }
+
@Override public AhatInstance getAssociatedBitmapInstance() {
return getBitmapInfo() == null ? null : this;
}
@@ -177,6 +208,25 @@
}
/**
+ * Returns the descriptor of an android.os.Binder object.
+ * If no descriptor is set, returns an empty string.
+ * If the object is not an android.os.Binder object, returns null.
+ */
+ private String getBinderDescriptor() {
+ if (isInstanceOfClass("android.os.Binder")) {
+ Value value = getField("mDescriptor");;
+
+ if (value == null) {
+ return "";
+ } else {
+ return value.asAhatInstance().asString();
+ }
+ } else {
+ return null;
+ }
+ }
+
+ /**
* Read the given field from the given instance.
* The field is assumed to be a byte[] field.
* Returns null if the field value is null, not a byte[] or could not be read.
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index 3d691c7..281c977 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -330,6 +330,25 @@
}
/**
+ * Returns true if this is an instance of a (subclass of a) class with the
+ * given name.
+ *
+ * @param className the name of the class to check for
+ * @return true if this is an instance of a (subclass of a) class with the
+ * given name
+ */
+ public boolean isInstanceOfClass(String className) {
+ AhatClassObj cls = getClassObj();
+ while (cls != null) {
+ if (className.equals(cls.getName())) {
+ return true;
+ }
+ cls = cls.getSuperClassObj();
+ }
+ return false;
+ }
+
+ /**
* Returns true if the given instance is an array instance.
*
* @return true if the given instance is an array instance
@@ -471,6 +490,44 @@
}
/**
+ * Returns the name of the Binder proxy interface associated with this object.
+ * Only applies to instances of android.os.BinderProxy. If this is an
+ * instance of BinderProxy, returns the fully qualified binder interface name,
+ * otherwise returns null.
+ *
+ * @return the name of the binder interface associated with this object
+ */
+ public String getBinderProxyInterfaceName() {
+ return null;
+ }
+
+ /**
+ * Returns the descriptor of the Binder token associated with this object.
+ * Only applies to instances of android.os.Binder. If this is an instance of
+ * android.os.Binder with a subclass of the name "descriptor$Stub", the
+ * object in question is a binder stub, and this function will return null.
+ * In that case, @see AhatInstance#getBinderStubInterfaceName
+ *
+ * @return the descriptor of this object, if it's a binder token
+ */
+ public String getBinderTokenDescriptor() {
+ return null;
+ }
+
+ /**
+ * Returns the name of the Binder stub interface associated with this object.
+ * Only applies to instances which are a subclass of android.os.Binder,
+ * and are an instance of class 'descriptor$Stub', where descriptor
+ * is the descriptor of the android.os.Binder object.
+ *
+ * @return the name of the binder interface associated with this object,
+ * or null if this is not a binder stub interface.
+ */
+ public String getBinderStubInterfaceName() {
+ return null;
+ }
+
+ /**
* Returns the android.graphics.Bitmap instance associated with this object.
* Instances of android.graphics.Bitmap return themselves. If this is a
* byte[] array containing pixel data for an instance of
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
index 46a1729..4f0660f 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
@@ -23,6 +23,8 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
/**
* Used to collection information about objects allocated at a particular
@@ -259,22 +261,37 @@
* every heap should be collected.
* @param className the name of the class the collected objects should
* belong to. This may be null to indicate objects of
- * every class should be collected.
+ * every class should be collected. Instances of subclasses
+ * of this class are not included.
* @param objects out parameter. A collection of objects that all
* collected objects should be added to.
*/
public void getObjects(String heapName, String className, Collection<AhatInstance> objects) {
+ Predicate<AhatInstance> predicate = x -> {
+ return (heapName == null || x.getHeap().getName().equals(heapName))
+ && (className == null || x.getClassName().equals(className));
+ };
+ getObjects(predicate, x -> objects.add(x));
+ }
+
+ /**
+ * Collects the objects allocated under this site, filtered by the given
+ * predicate.
+ * Includes objects allocated in children sites.
+ * @param predicate limit instances to those satisfying this predicate
+ * @param consumer consumer of the objects
+ */
+ public void getObjects(Predicate<AhatInstance> predicate, Consumer<AhatInstance> consumer) {
for (AhatInstance inst : mObjects) {
- if ((heapName == null || inst.getHeap().getName().equals(heapName))
- && (className == null || inst.getClassName().equals(className))) {
- objects.add(inst);
+ if (predicate.test(inst)) {
+ consumer.accept(inst);
}
}
// Recursively visit children. Recursion should be okay here because the
// stack depth is limited by a reasonable amount (128 frames or so).
for (Site child : mChildren) {
- child.getObjects(heapName, className, objects);
+ child.getObjects(predicate, consumer);
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
index 5c21a9e..88231dd 100644
--- a/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
+++ b/tools/ahat/src/main/com/android/ahat/proguard/ProguardMap.java
@@ -25,6 +25,7 @@
import java.text.ParseException;
import java.util.HashMap;
import java.util.Map;
+import java.util.TreeMap;
/**
* A representation of a proguard mapping for deobfuscating class names,
@@ -35,13 +36,36 @@
private static final String ARRAY_SYMBOL = "[]";
private static class FrameData {
- public FrameData(String clearMethodName, int lineDelta) {
+ public FrameData(String clearMethodName) {
this.clearMethodName = clearMethodName;
- this.lineDelta = lineDelta;
}
- public final String clearMethodName;
- public final int lineDelta; // lineDelta = obfuscatedLine - clearLine
+ private final String clearMethodName;
+ private final TreeMap<Integer, LineNumber> lineNumbers = new TreeMap<>();
+
+ public int getClearLine(int obfuscatedLine) {
+ Map.Entry<Integer, LineNumber> lineNumberEntry = lineNumbers.floorEntry(obfuscatedLine);
+ LineNumber lineNumber = lineNumberEntry == null ? null : lineNumberEntry.getValue();
+ if (lineNumber != null
+ && obfuscatedLine >= lineNumber.obfuscatedLineStart
+ && obfuscatedLine <= lineNumber.obfuscatedLineEnd) {
+ return lineNumber.clearLineStart + obfuscatedLine - lineNumber.obfuscatedLineStart;
+ } else {
+ return obfuscatedLine;
+ }
+ }
+ }
+
+ private static class LineNumber {
+ public LineNumber(int obfuscatedLineStart, int obfuscatedLineEnd, int clearLineStart) {
+ this.obfuscatedLineStart = obfuscatedLineStart;
+ this.obfuscatedLineEnd = obfuscatedLineEnd;
+ this.clearLineStart = clearLineStart;
+ }
+
+ private final int obfuscatedLineStart;
+ private final int obfuscatedLineEnd;
+ private final int clearLineStart;
}
private static class ClassData {
@@ -77,13 +101,16 @@
return clearField == null ? obfuscatedName : clearField;
}
- // TODO: Does this properly interpret the meaning of line numbers? Is
- // it possible to have multiple frame entries for the same method
- // name and signature that differ only by line ranges?
public void addFrame(String obfuscatedMethodName, String clearMethodName,
- String clearSignature, int obfuscatedLine, int clearLine) {
- String key = obfuscatedMethodName + clearSignature;
- mFrames.put(key, new FrameData(clearMethodName, obfuscatedLine - clearLine));
+ String clearSignature, int obfuscatedLine, int obfuscatedLineEnd, int clearLine) {
+ String key = obfuscatedMethodName + clearSignature;
+ FrameData data = mFrames.get(key);
+ if (data == null) {
+ data = new FrameData(clearMethodName);
+ }
+ data.lineNumbers.put(
+ obfuscatedLine, new LineNumber(obfuscatedLine, obfuscatedLineEnd, clearLine));
+ mFrames.put(key, data);
}
public Frame getFrame(String clearClassName, String obfuscatedMethodName,
@@ -91,10 +118,10 @@
String key = obfuscatedMethodName + clearSignature;
FrameData frame = mFrames.get(key);
if (frame == null) {
- frame = new FrameData(obfuscatedMethodName, 0);
+ frame = new FrameData(obfuscatedMethodName);
}
return new Frame(frame.clearMethodName, clearSignature,
- getFileName(clearClassName), obfuscatedLine - frame.lineDelta);
+ getFileName(clearClassName), frame.getClearLine(obfuscatedLine));
}
}
@@ -225,13 +252,18 @@
} else {
// For methods, the type is of the form: [#:[#:]]<returnType>
int obfuscatedLine = 0;
+ // The end of the obfuscated line range.
+ // If line does not contain explicit end range, e.g #:, it is equivalent to #:#:
+ int obfuscatedLineEnd = 0;
int colon = type.indexOf(':');
if (colon != -1) {
obfuscatedLine = Integer.parseInt(type.substring(0, colon));
+ obfuscatedLineEnd = obfuscatedLine;
type = type.substring(colon + 1);
}
colon = type.indexOf(':');
if (colon != -1) {
+ obfuscatedLineEnd = Integer.parseInt(type.substring(0, colon));
type = type.substring(colon + 1);
}
@@ -261,7 +293,7 @@
String clearSig = fromProguardSignature(sig + type);
classData.addFrame(obfuscatedName, clearName, clearSig,
- obfuscatedLine, clearLine);
+ obfuscatedLine, obfuscatedLineEnd, clearLine);
}
line = reader.readLine();
diff --git a/tools/ahat/src/test-dump/DumpedStuff.java b/tools/ahat/src/test-dump/DumpedStuff.java
index 804a3a3..de2968f 100644
--- a/tools/ahat/src/test-dump/DumpedStuff.java
+++ b/tools/ahat/src/test-dump/DumpedStuff.java
@@ -124,6 +124,47 @@
}
}
+ public interface IDumpedManager {
+ public static class Stub extends android.os.Binder implements IDumpedManager {
+ private static final java.lang.String DESCRIPTOR = "DumpedStuff$IDumpedManager";
+ public Stub() {
+ super(DESCRIPTOR);
+ }
+ public static class Proxy implements IDumpedManager {
+ android.os.IBinder mRemote;
+ Proxy(android.os.IBinder binderProxy) {
+ mRemote = binderProxy;
+ }
+ }
+ }
+ }
+
+ public interface IBinderInterfaceImpostor {
+ public static class Stub {
+ public static class Proxy implements IBinderInterfaceImpostor {
+ android.os.IBinder mFakeRemote = new android.os.BinderProxy();
+ Proxy(android.os.IBinder binderProxy) {
+ mFakeRemote = binderProxy;
+ }
+ }
+ }
+ }
+
+ private static class BinderProxyCarrier {
+ android.os.IBinder mRemote;
+ BinderProxyCarrier(android.os.IBinder binderProxy) {
+ mRemote = binderProxy;
+ }
+ }
+
+ private static class BinderService extends IDumpedManager.Stub {
+ // Intentionally empty
+ };
+
+ private static class FakeBinderService extends IBinderInterfaceImpostor.Stub {
+ // Intentionally empty
+ };
+
public String basicString = "hello, world";
public String nonAscii = "Sigma (Æ©) is not ASCII";
public String embeddedZero = "embedded\0..."; // Non-ASCII for string compression purposes.
@@ -158,6 +199,17 @@
public int[] modifiedArray;
public Object objectAllocatedAtKnownSite;
public Object objectAllocatedAtKnownSubSite;
+ public android.os.IBinder correctBinderProxy = new android.os.BinderProxy();
+ public android.os.IBinder imposedBinderProxy = new android.os.BinderProxy();
+ public android.os.IBinder carriedBinderProxy = new android.os.BinderProxy();
+ Object correctBinderProxyObject = new IDumpedManager.Stub.Proxy(correctBinderProxy);
+ Object impostorBinderProxyObject = new IBinderInterfaceImpostor.Stub.Proxy(imposedBinderProxy);
+ Object carrierBinderProxyObject = new BinderProxyCarrier(carriedBinderProxy);
+
+ Object binderService = new BinderService();
+ Object fakeBinderService = new FakeBinderService();
+ Object binderToken = new android.os.Binder();
+ Object namedBinderToken = new android.os.Binder("awesomeToken");
// Allocate those objects that we need to not be GC'd before taking the heap
// dump.
diff --git a/tools/ahat/src/test-dump/android/os/Binder.java b/tools/ahat/src/test-dump/android/os/Binder.java
new file mode 100644
index 0000000..e89bb74
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/Binder.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+import java.lang.String;
+
+/** Fake android.os.Binder class that just holds a descriptor.
+ *
+ * Note that having this class will cause Proguard to issue warnings when
+ * building ahat-test-dump with 'mm' or 'mma':
+ *
+ * Warning: Library class android.net.wifi.rtt.IWifiRttManager$Stub extends
+ * program class android.os.Binder
+ *
+ * This is because when building for the device, proguard will include the
+ * framework jars, which contain Stub classes that extend android.os.Binder,
+ * which is defined again here.
+ *
+ * Since we don't actually run this code on the device, these warnings can
+ * be ignored.
+ */
+public class Binder implements IBinder {
+ public Binder() {
+ mDescriptor = null;
+ }
+
+ public Binder(String descriptor) {
+ mDescriptor = descriptor;
+ }
+
+ private String mDescriptor;
+}
diff --git a/tools/ahat/src/test-dump/android/os/BinderProxy.java b/tools/ahat/src/test-dump/android/os/BinderProxy.java
new file mode 100644
index 0000000..5f35c61
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/BinderProxy.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/** Fake android.os.BinderProxy class that does absolutely nothing. */
+public class BinderProxy implements IBinder {}
diff --git a/tools/ahat/src/test-dump/android/os/IBinder.java b/tools/ahat/src/test-dump/android/os/IBinder.java
new file mode 100644
index 0000000..6f01468
--- /dev/null
+++ b/tools/ahat/src/test-dump/android/os/IBinder.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.os;
+
+/** Fake android.os.IBinder that means nothing. */
+public interface IBinder {}
diff --git a/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java
index 3aa52b5..abc3cc7 100644
--- a/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java
+++ b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java
@@ -29,6 +29,7 @@
InstanceTest.class,
NativeAllocationTest.class,
ObjectHandlerTest.class,
+ ObjectsHandlerTest.class,
OverviewHandlerTest.class,
PerformanceTest.class,
ProguardMapTest.class,
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index 196eb1e..af0a73b 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -549,4 +549,60 @@
// Other kinds of objects should not have associated classes for overhead.
assertNull(cls.getAssociatedClassForOverhead());
}
+
+ @Test
+ public void binderProxy() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ AhatInstance correctObj = dump.getDumpedAhatInstance("correctBinderProxy");
+ assertEquals("DumpedStuff$IDumpedManager", correctObj.getBinderProxyInterfaceName());
+
+ AhatInstance imposedObj = dump.getDumpedAhatInstance("imposedBinderProxy");
+ assertNull(imposedObj.getBinderProxyInterfaceName());
+
+ AhatInstance carriedObj = dump.getDumpedAhatInstance("carriedBinderProxy");
+ assertNull(carriedObj.getBinderProxyInterfaceName());
+ }
+
+ @Test
+ public void binderToken() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ // Tokens without a descriptor return an empty string
+ AhatInstance binderToken = dump.getDumpedAhatInstance("binderToken");
+ assertEquals("", binderToken.getBinderTokenDescriptor());
+
+ // Named binder tokens return their descriptor
+ AhatInstance namedBinderToken = dump.getDumpedAhatInstance("namedBinderToken");
+ assertEquals("awesomeToken", namedBinderToken.getBinderTokenDescriptor());
+
+ // Binder stubs aren't considered binder tokens
+ AhatInstance binderService = dump.getDumpedAhatInstance("binderService");
+ assertEquals(null, binderService.getBinderTokenDescriptor());
+ }
+
+ @Test
+ public void binderStub() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+
+ // Regular binder service returns the interface name and no token descriptor
+ AhatInstance binderService = dump.getDumpedAhatInstance("binderService");
+ assertEquals("DumpedStuff$IDumpedManager", binderService.getBinderStubInterfaceName());
+
+ // Binder tokens aren't considered binder services
+ AhatInstance binderToken = dump.getDumpedAhatInstance("binderToken");
+ assertEquals(null, binderToken.getBinderStubInterfaceName());
+
+ // Named binder tokens aren't considered binder services
+ AhatInstance namedBinderToken = dump.getDumpedAhatInstance("namedBinderToken");
+ assertEquals(null, namedBinderToken.getBinderStubInterfaceName());
+
+ // Fake service returns null
+ AhatInstance fakeService = dump.getDumpedAhatInstance("fakeBinderService");
+ assertNull(fakeService.getBinderStubInterfaceName());
+
+ // Random non-binder object returns null
+ AhatInstance nonBinderObject = dump.getDumpedAhatInstance("anObject");
+ assertNull(nonBinderObject.getBinderStubInterfaceName());
+ }
}
diff --git a/tools/ahat/src/test/com/android/ahat/ObjectsHandlerTest.java b/tools/ahat/src/test/com/android/ahat/ObjectsHandlerTest.java
new file mode 100644
index 0000000..927e017
--- /dev/null
+++ b/tools/ahat/src/test/com/android/ahat/ObjectsHandlerTest.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.AhatSnapshot;
+import com.android.ahat.heapdump.Site;
+import java.io.IOException;
+import java.util.List;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ObjectsHandlerTest {
+ @Test
+ public void getObjects() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ AhatSnapshot snapshot = dump.getAhatSnapshot();
+
+ Site root = snapshot.getRootSite();
+
+ // We expect a single instance of DumpedStuff
+ List<AhatInstance> dumped = ObjectsHandler.getObjects(
+ root, "DumpedStuff", /* subclass */ false, /* heapName */ null);
+ assertEquals(1, dumped.size());
+ assertTrue(dumped.get(0).getClassName().equals("DumpedStuff"));
+
+ // We expect no direct instances of SuperDumpedStuff
+ List<AhatInstance> direct = ObjectsHandler.getObjects(
+ root, "SuperDumpedStuff", /* subclass */ false, /* heapName */ null);
+ assertTrue(direct.isEmpty());
+
+ // We expect one subclass instance of SuperDumpedStuff
+ List<AhatInstance> subclass = ObjectsHandler.getObjects(
+ root, "SuperDumpedStuff", /* subclass */ true, /* heapName */ null);
+ assertEquals(1, subclass.size());
+ assertTrue(subclass.get(0).getClassName().equals("DumpedStuff"));
+ assertEquals(dumped.get(0), subclass.get(0));
+ }
+}
diff --git a/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java b/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java
index a9952ee..2343b45 100644
--- a/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java
+++ b/tools/ahat/src/test/com/android/ahat/ProguardMapTest.java
@@ -48,7 +48,8 @@
+ " 59:61:void methodWithObfObjArg(class.with.only.Fields) -> m\n"
+ " 64:66:class.with.only.Fields methodWithObfRes() -> n\n"
+ " 80:80:void lineObfuscatedMethod():8:8 -> o\n"
- + " 90:90:void lineObfuscatedMethod2():9 -> p\n"
+ + " 100:105:void lineObfuscatedMethod():50 -> o\n"
+ + " 90:94:void lineObfuscatedMethod2():9 -> p\n"
;
@Test
@@ -157,6 +158,12 @@
assertEquals("Methods.java", frame.filename);
assertEquals(8, frame.line);
+ frame = map.getFrame("class.with.Methods", "o", "()V", "SourceFile.java", 103);
+ assertEquals("lineObfuscatedMethod", frame.method);
+ assertEquals("()V", frame.signature);
+ assertEquals("Methods.java", frame.filename);
+ assertEquals(53, frame.line);
+
frame = map.getFrame("class.with.Methods", "p", "()V", "SourceFile.java", 94);
assertEquals("lineObfuscatedMethod2", frame.method);
assertEquals("()V", frame.signature);
diff --git a/tools/ahat/src/test/com/android/ahat/QueryTest.java b/tools/ahat/src/test/com/android/ahat/QueryTest.java
index 5bcf8ea..52cf963 100644
--- a/tools/ahat/src/test/com/android/ahat/QueryTest.java
+++ b/tools/ahat/src/test/com/android/ahat/QueryTest.java
@@ -41,6 +41,7 @@
assertEquals("/object?answer=43&foo=bar", query.with("answer", "43").toString());
assertEquals("/object?answer=43&foo=bar", query.with("answer", 43).toString());
assertEquals("/object?answer=42&bar=finally&foo=bar", query.with("bar", "finally").toString());
+ assertEquals("/object?answer=42", query.with("foo", null).toString());
}
@Test
@@ -55,6 +56,7 @@
assertEquals("/object?answer=43&foo=sludge", query.with("answer", "43").toString());
assertEquals("/object?answer=42&bar=finally&foo=sludge",
query.with("bar", "finally").toString());
+ assertEquals("/object?answer=42", query.with("foo", null).toString());
}
@Test
@@ -66,5 +68,6 @@
assertEquals(2, query.getInt("foo", 2));
assertEquals("/object?foo=sludge", query.with("foo", "sludge").toString());
assertEquals("/object?answer=43", query.with("answer", "43").toString());
+ assertEquals("/object?", query.with("foo", null).toString());
}
}
diff --git a/tools/amm/Android.bp b/tools/amm/Android.bp
new file mode 100644
index 0000000..e6f6ff7
--- /dev/null
+++ b/tools/amm/Android.bp
@@ -0,0 +1,25 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// --- ammtestjni.so -------------
+
+cc_library_shared {
+ name: "libammtestjni",
+
+ srcs: [
+ "AmmTest/jni/ammtest.c",
+ ],
+
+ sdk_version: "current",
+}
diff --git a/tools/amm/Android.mk b/tools/amm/Android.mk
index 47030c5..fa4ca44 100644
--- a/tools/amm/Android.mk
+++ b/tools/amm/Android.mk
@@ -14,13 +14,6 @@
LOCAL_PATH := $(call my-dir)
-# --- ammtestjni.so -------------
-include $(CLEAR_VARS)
-LOCAL_MODULE := libammtestjni
-LOCAL_SRC_FILES := $(call all-c-files-under, AmmTest/jni)
-LOCAL_SDK_VERSION := current
-include $(BUILD_SHARED_LIBRARY)
-
# --- AmmTest.apk --------------
include $(CLEAR_VARS)
LOCAL_PACKAGE_NAME := AmmTest
@@ -31,4 +24,3 @@
LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/AmmTest/aahat.png
LOCAL_MANIFEST_FILE := AmmTest/AndroidManifest.xml
include $(BUILD_PACKAGE)
-
diff --git a/tools/art b/tools/art
index 9c032c0..fbc7992 100644
--- a/tools/art
+++ b/tools/art
@@ -49,6 +49,7 @@
--profile Run with profiling, then run using profile data.
--verbose Run script verbosely.
--no-clean Don't cleanup oat directories.
+ --no-compile Don't invoke dex2oat before running.
--allow-default-jdwp Don't automatically put in -XjdwpProvider:none.
You probably do not want this.
@@ -290,6 +291,7 @@
ALLOW_DEFAULT_JDWP="no"
VERBOSE="no"
CLEAN_OAT_FILES="yes"
+RUN_DEX2OAT="yes"
EXTRA_OPTIONS=()
DEX2OAT_FLAGS=()
DEX2OAT_CLASSPATH=()
@@ -347,6 +349,10 @@
--no-clean)
CLEAN_OAT_FILES="no"
;;
+ --no-compile)
+ CLEAN_OAT_FILES="no"
+ RUN_DEX2OAT="no"
+ ;;
--allow-default-jdwp)
ALLOW_DEFAULT_JDWP="yes"
;;
@@ -480,8 +486,10 @@
fi
if [ -x "$DEX2OAT_BINARY_PATH" ]; then
- # Run dex2oat before launching ART to generate the oat files for the classpath.
- run_dex2oat
+ if [ "$RUN_DEX2OAT" = "yes" ]; then
+ # Run dex2oat before launching ART to generate the oat files for the classpath.
+ run_dex2oat
+ fi
fi
# Do not continue if the dex2oat failed.
diff --git a/tools/art_verifier/Android.bp b/tools/art_verifier/Android.bp
index afd52fb..6fff27a 100644
--- a/tools/art_verifier/Android.bp
+++ b/tools/art_verifier/Android.bp
@@ -16,7 +16,10 @@
art_cc_defaults {
name: "art_verifier-defaults",
- defaults: ["art_defaults"],
+ defaults: [
+ "art_defaults",
+ "libart_static_defaults",
+ ],
host_supported: true,
srcs: [
"art_verifier.cc",
@@ -24,11 +27,8 @@
header_libs: [
"art_cmdlineparser_headers",
],
- static_libs: art_static_dependencies + [
- "libart",
- "libartbase",
- "libdexfile",
- "libprofile",
+ static_libs: [
+ "libsigchain_dummy",
],
target: {
android: {
diff --git a/tools/art_verifier/art_verifier.cc b/tools/art_verifier/art_verifier.cc
index bb43e67..0ef6c06 100644
--- a/tools/art_verifier/art_verifier.cc
+++ b/tools/art_verifier/art_verifier.cc
@@ -46,8 +46,8 @@
std::string error_msg;
if (!dex_file_loader.Open(dex_filename.c_str(),
dex_filename.c_str(),
- /* verify */ true,
- /* verify_checksum */ true,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_msg,
dex_files)) {
LOG(ERROR) << error_msg;
@@ -111,6 +111,9 @@
} else if (option.starts_with("--repetitions=")) {
char* end;
repetitions_ = strtoul(option.substr(strlen("--repetitions=")).data(), &end, 10);
+ } else if (option.starts_with("--api-level=")) {
+ char* end;
+ api_level_ = strtoul(option.substr(strlen("--api-level=")).data(), &end, 10);
} else {
return kParseUnknownArgument;
}
@@ -134,7 +137,7 @@
return kParseOk;
}
- virtual std::string GetUsage() const {
+ std::string GetUsage() const override {
std::string usage;
usage +=
@@ -146,6 +149,7 @@
" --verbose: use verbose verifier mode.\n"
" --verbose-debug: use verbose verifier debug mode.\n"
" --repetitions=<count>: repeat the verification count times.\n"
+ " --api-level=<level>: use API level for verification.\n"
"\n";
usage += Base::GetUsage();
@@ -162,6 +166,8 @@
bool method_verifier_verbose_debug_ = false;
size_t repetitions_ = 0u;
+
+ uint32_t api_level_ = 0u;
};
struct MethodVerifierMain : public CmdlineMain<MethodVerifierArgs> {
@@ -241,6 +247,7 @@
runtime->GetCompilerCallbacks(),
true,
verifier::HardFailLogMode::kLogWarning,
+ args_->api_level_,
&error_msg);
if (args_->repetitions_ == 0) {
LOG(INFO) << descriptor << ": " << res << " " << error_msg;
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 8305051..3d70087 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -68,15 +68,15 @@
make_command+=" dx-tests"
mode_suffix="-host"
elif [[ $mode == "target" ]]; then
- if [[ -z "$TARGET_PRODUCT" ]]; then
- echo 'TARGET_PRODUCT environment variable is empty; did you forget to run `lunch`?'
+ if [[ -z "${ANDROID_PRODUCT_OUT}" ]]; then
+ echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
exit 1
fi
make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh"
make_command+=" debuggerd su"
- make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
- make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
+ make_command+=" libstdc++ "
+ make_command+=" ${ANDROID_PRODUCT_OUT#"${ANDROID_BUILD_TOP}/"}/system/etc/public.libraries.txt"
if [[ -n "$ART_TEST_CHROOT" ]]; then
# These targets are needed for the chroot environment.
make_command+=" crash_dump event-log-tags"
diff --git a/tools/buildbot-sync.sh b/tools/buildbot-sync.sh
new file mode 100755
index 0000000..01b3c0d
--- /dev/null
+++ b/tools/buildbot-sync.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+adb wait-for-device
+
+if [[ -z "${ANDROID_PRODUCT_OUT}" ]]; then
+ echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
+ exit 1
+fi
+
+if [[ -z "${ART_TEST_CHROOT}" ]]; then
+ echo 'ART_TEST_CHROOT environment variable is empty'
+ exit 1
+fi
+
+adb push ${ANDROID_PRODUCT_OUT}/system ${ART_TEST_CHROOT}/
+adb push ${ANDROID_PRODUCT_OUT}/data ${ART_TEST_CHROOT}/
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
index b805b30..57ccbdc 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -77,6 +77,9 @@
mStatus.debug("Member has annotation %s for which we have a handler",
a.getAnnotationType());
mAnnotationHandlers.get(a.getAnnotationType()).handleAnnotation(a, context);
+ } else {
+ mStatus.debug("Member has annotation %s for which we do not have a handler",
+ a.getAnnotationType());
}
}
}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 9262076..870f85a 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -17,7 +17,9 @@
package com.android.class2greylist;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
@@ -33,12 +35,15 @@
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.function.Predicate;
/**
* Build time tool for extracting a list of members from jar files that have the @UsedByApps
@@ -46,16 +51,20 @@
*/
public class Class2Greylist {
- private static final String GREYLIST_ANNOTATION = "Landroid/annotation/UnsupportedAppUsage;";
+ private static final Set<String> GREYLIST_ANNOTATIONS =
+ ImmutableSet.of(
+ "Landroid/annotation/UnsupportedAppUsage;",
+ "Ldalvik/annotation/compat/UnsupportedAppUsage;");
private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
private final Status mStatus;
private final String mPublicApiListFile;
private final String[] mPerSdkOutputFiles;
private final String mWhitelistFile;
+ private final String mCsvMetadataFile;
private final String[] mJarFiles;
private final GreylistConsumer mOutput;
- private final Set<Integer> mAllowedSdkVersions;
+ private final Predicate<Integer> mAllowedSdkVersions;
private final Set<String> mPublicApis;
@@ -71,9 +80,10 @@
.hasArgs()
.withDescription(
"Specify file to write greylist to. Can be specified multiple times. " +
- "Format is either just a filename, or \"int:filename\". If an integer is " +
- "given, members with a matching maxTargetSdk are written to the file; if " +
- "no integer is given, members with no maxTargetSdk are written.")
+ "Format is either just a filename, or \"int[,int,...]:filename\". If " +
+ "integers are given, members with matching maxTargetSdk values are " +
+ "written to the file; if no integer or \"none\" is given, members with " +
+ "no maxTargetSdk are written.")
.create("g"));
options.addOption(OptionBuilder
.withLongOpt("write-whitelist")
@@ -92,10 +102,17 @@
.hasArgs(0)
.create('m'));
options.addOption(OptionBuilder
+ .withLongOpt("write-metadata-csv")
+ .hasArgs(1)
+ .withDescription("Specify a file to write API metaadata to. This is a CSV file " +
+ "containing any annotation properties for all members. Do not use in " +
+ "conjunction with --write-greylist or --write-whitelist.")
+ .create('c'));
+ options.addOption(OptionBuilder
.withLongOpt("help")
.hasArgs(0)
.withDescription("Show this help")
- .create("h"));
+ .create('h'));
CommandLineParser parser = new GnuParser();
CommandLine cmd;
@@ -129,6 +146,7 @@
cmd.getOptionValue('p', null),
cmd.getOptionValues('g'),
cmd.getOptionValue('w', null),
+ cmd.getOptionValue('c', null),
jarFiles);
c2gl.main();
} catch (IOException e) {
@@ -146,22 +164,33 @@
@VisibleForTesting
Class2Greylist(Status status, String publicApiListFile, String[] perSdkLevelOutputFiles,
- String whitelistOutputFile, String[] jarFiles) throws IOException {
+ String whitelistOutputFile, String csvMetadataFile, String[] jarFiles)
+ throws IOException {
mStatus = status;
mPublicApiListFile = publicApiListFile;
mPerSdkOutputFiles = perSdkLevelOutputFiles;
mWhitelistFile = whitelistOutputFile;
+ mCsvMetadataFile = csvMetadataFile;
mJarFiles = jarFiles;
- if (mPerSdkOutputFiles != null) {
+ if (mCsvMetadataFile != null) {
+ mOutput = new CsvGreylistConsumer(mStatus, mCsvMetadataFile);
+ mAllowedSdkVersions = x -> true;
+ } else {
Map<Integer, String> outputFiles = readGreylistMap(mStatus, mPerSdkOutputFiles);
mOutput = new FileWritingGreylistConsumer(mStatus, outputFiles, mWhitelistFile);
- mAllowedSdkVersions = outputFiles.keySet();
- } else {
- // TODO remove this once per-SDK greylist support integrated into the build.
- // Right now, mPerSdkOutputFiles is always null as the build never passes the
- // corresponding command lind flags. Once the build is updated, can remove this.
- mOutput = new SystemOutGreylistConsumer();
- mAllowedSdkVersions = new HashSet<>(Arrays.asList(null, 26, 28));
+ mAllowedSdkVersions = new Predicate<Integer>(){
+ @Override
+ public boolean test(Integer i) {
+ return outputFiles.keySet().contains(i);
+ }
+
+ @Override
+ public String toString() {
+ // we reply on this toString behaviour for readable error messages in
+ // GreylistAnnotationHandler
+ return Joiner.on(",").join(outputFiles.keySet());
+ }
+ };
}
if (mPublicApiListFile != null) {
@@ -173,10 +202,11 @@
}
private Map<String, AnnotationHandler> createAnnotationHandlers() {
- return ImmutableMap.<String, AnnotationHandler>builder()
- .put(GreylistAnnotationHandler.ANNOTATION_NAME,
- new GreylistAnnotationHandler(
- mStatus, mOutput, mPublicApis, mAllowedSdkVersions))
+ Builder<String, AnnotationHandler> builder = ImmutableMap.builder();
+ GreylistAnnotationHandler greylistAnnotationHandler = new GreylistAnnotationHandler(
+ mStatus, mOutput, mPublicApis, mAllowedSdkVersions);
+ GREYLIST_ANNOTATIONS.forEach(a -> builder.put(a, greylistAnnotationHandler));
+ return builder
.put(CovariantReturnTypeHandler.ANNOTATION_NAME,
new CovariantReturnTypeHandler(mOutput, mPublicApis))
.put(CovariantReturnTypeMultiHandler.ANNOTATION_NAME,
@@ -204,15 +234,22 @@
static Map<Integer, String> readGreylistMap(Status status, String[] argValues) {
Map<Integer, String> map = new HashMap<>();
for (String sdkFile : argValues) {
- Integer maxTargetSdk = null;
+ List<Integer> maxTargetSdks = new ArrayList<>();
String filename;
int colonPos = sdkFile.indexOf(':');
if (colonPos != -1) {
- try {
- maxTargetSdk = Integer.valueOf(sdkFile.substring(0, colonPos));
- } catch (NumberFormatException nfe) {
- status.error("Not a valid integer: %s from argument value '%s'",
- sdkFile.substring(0, colonPos), sdkFile);
+ String[] targets = sdkFile.substring(0, colonPos).split(",");
+ for (String target : targets) {
+ if ("none".equals(target)) {
+ maxTargetSdks.add(null);
+ } else {
+ try {
+ maxTargetSdks.add(Integer.valueOf(target));
+ } catch (NumberFormatException nfe) {
+ status.error("Not a valid integer: %s from argument value '%s'",
+ sdkFile.substring(0, colonPos), sdkFile);
+ }
+ }
}
filename = sdkFile.substring(colonPos + 1);
if (filename.length() == 0) {
@@ -220,13 +257,16 @@
filename, sdkFile);
}
} else {
- maxTargetSdk = null;
+ maxTargetSdks.add(null);
filename = sdkFile;
}
- if (map.containsKey(maxTargetSdk)) {
- status.error("Multiple output files for maxTargetSdk %s", maxTargetSdk);
- } else {
- map.put(maxTargetSdk, filename);
+ for (Integer maxTargetSdk : maxTargetSdks) {
+ if (map.containsKey(maxTargetSdk)) {
+ status.error("Multiple output files for maxTargetSdk %s",
+ maxTargetSdk == null ? "none" : maxTargetSdk);
+ } else {
+ map.put(maxTargetSdk, filename);
+ }
}
}
return map;
diff --git a/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java
new file mode 100644
index 0000000..7d28b31
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CsvGreylistConsumer.java
@@ -0,0 +1,35 @@
+package com.android.class2greylist;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.PrintStream;
+import java.util.Map;
+
+public class CsvGreylistConsumer implements GreylistConsumer {
+
+ private final Status mStatus;
+ private final CsvWriter mCsvWriter;
+
+ public CsvGreylistConsumer(Status status, String csvMetadataFile) throws FileNotFoundException {
+ mStatus = status;
+ mCsvWriter = new CsvWriter(
+ new PrintStream(new FileOutputStream(new File(csvMetadataFile))));
+ }
+
+ @Override
+ public void greylistEntry(String signature, Integer maxTargetSdk,
+ Map<String, String> annotationProperties) {
+ annotationProperties.put("signature", signature);
+ mCsvWriter.addRow(annotationProperties);
+ }
+
+ @Override
+ public void whitelistEntry(String signature) {
+ }
+
+ @Override
+ public void close() {
+ mCsvWriter.close();
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java b/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java
new file mode 100644
index 0000000..3cfec30
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/CsvWriter.java
@@ -0,0 +1,49 @@
+package com.android.class2greylist;
+
+import com.google.common.base.Joiner;
+
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Helper class for writing data to a CSV file.
+ *
+ * This class does not write anything to its output until it is closed, so it can gather a set of
+ * all columns before writing the header row.
+ */
+public class CsvWriter {
+
+ private final PrintStream mOutput;
+ private final ArrayList<Map<String, String>> mContents;
+ private final Set<String> mColumns;
+
+ public CsvWriter(PrintStream out) {
+ mOutput = out;
+ mContents = new ArrayList<>();
+ mColumns = new HashSet<>();
+ }
+
+ public void addRow(Map<String, String> values) {
+ mColumns.addAll(values.keySet());
+ mContents.add(values);
+ }
+
+ public void close() {
+ List<String> columns = new ArrayList<>(mColumns);
+ columns.sort(Comparator.naturalOrder());
+ mOutput.println(columns.stream().collect(Collectors.joining(",")));
+ for (Map<String, String> row : mContents) {
+ mOutput.println(columns.stream().map(column -> row.getOrDefault(column, "")).collect(
+ Collectors.joining(",")));
+ }
+ mOutput.close();
+ }
+
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
index 9f33467..b3ed1b1 100644
--- a/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/FileWritingGreylistConsumer.java
@@ -1,5 +1,7 @@
package com.android.class2greylist;
+import com.google.common.annotations.VisibleForTesting;
+
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
@@ -20,11 +22,16 @@
return new PrintStream(new FileOutputStream(new File(filename)));
}
- private static Map<Integer, PrintStream> openFiles(
+ @VisibleForTesting
+ public static Map<Integer, PrintStream> openFiles(
Map<Integer, String> filenames) throws FileNotFoundException {
+ Map<String, PrintStream> streamsByName = new HashMap<>();
Map<Integer, PrintStream> streams = new HashMap<>();
for (Map.Entry<Integer, String> entry : filenames.entrySet()) {
- streams.put(entry.getKey(), openFile(entry.getValue()));
+ if (!streamsByName.containsKey(entry.getValue())) {
+ streamsByName.put(entry.getValue(), openFile(entry.getValue()));
+ }
+ streams.put(entry.getKey(), streamsByName.get(entry.getValue()));
}
return streams;
}
@@ -37,7 +44,8 @@
}
@Override
- public void greylistEntry(String signature, Integer maxTargetSdk) {
+ public void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationProperties) {
PrintStream p = mSdkToPrintStreamMap.get(maxTargetSdk);
if (p == null) {
mStatus.error("No output file for signature %s with maxTargetSdk of %d", signature,
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
index 460f2c3..72c0ea4 100644
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistAnnotationHandler.java
@@ -11,6 +11,8 @@
import org.apache.bcel.classfile.Method;
import org.apache.bcel.classfile.SimpleElementValue;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
@@ -27,8 +29,6 @@
*/
public class GreylistAnnotationHandler implements AnnotationHandler {
- public static final String ANNOTATION_NAME = "Landroid/annotation/UnsupportedAppUsage;";
-
// properties of greylist annotations:
private static final String EXPECTED_SIGNATURE = "expectedSignature";
private static final String MAX_TARGET_SDK = "maxTargetSdk";
@@ -36,7 +36,7 @@
private final Status mStatus;
private final Predicate<GreylistMember> mGreylistFilter;
private final GreylistConsumer mGreylistConsumer;
- private final Set<Integer> mValidMaxTargetSdkValues;
+ private final Predicate<Integer> mValidMaxTargetSdkValues;
/**
* Represents a member of a class file (a field or method).
@@ -73,7 +73,7 @@
Status status,
GreylistConsumer greylistConsumer,
Set<String> publicApis,
- Set<Integer> validMaxTargetSdkValues) {
+ Predicate<Integer> validMaxTargetSdkValues) {
this(status, greylistConsumer,
member -> !(member.bridge && publicApis.contains(member.signature)),
validMaxTargetSdkValues);
@@ -84,7 +84,7 @@
Status status,
GreylistConsumer greylistConsumer,
Predicate<GreylistMember> greylistFilter,
- Set<Integer> validMaxTargetSdkValues) {
+ Predicate<Integer> validMaxTargetSdkValues) {
mStatus = status;
mGreylistConsumer = greylistConsumer;
mGreylistFilter = greylistFilter;
@@ -101,6 +101,7 @@
}
String signature = context.getMemberDescriptor();
Integer maxTargetSdk = null;
+ Map<String, String> allValues = new HashMap<String, String>();
for (ElementValuePair property : annotation.getElementValuePairs()) {
switch (property.getNameString()) {
case EXPECTED_SIGNATURE:
@@ -110,9 +111,10 @@
maxTargetSdk = verifyAndGetMaxTargetSdk(context, property);
break;
}
+ allValues.put(property.getNameString(), property.getValue().stringifyValue());
}
if (mGreylistFilter.test(new GreylistMember(signature, bridge, maxTargetSdk))) {
- mGreylistConsumer.greylistEntry(signature, maxTargetSdk);
+ mGreylistConsumer.greylistEntry(signature, maxTargetSdk, allValues);
}
}
@@ -131,13 +133,14 @@
if (property.getValue().getElementValueType() != ElementValue.PRIMITIVE_INT) {
context.reportError("Expected property %s to be of type int; got %d",
property.getNameString(), property.getValue().getElementValueType());
+ return null;
}
int value = ((SimpleElementValue) property.getValue()).getValueInt();
- if (!mValidMaxTargetSdkValues.contains(value)) {
+ if (!mValidMaxTargetSdkValues.test(value)) {
context.reportError("Invalid value for %s: got %d, expected one of [%s]",
property.getNameString(),
value,
- Joiner.on(",").join(mValidMaxTargetSdkValues));
+ mValidMaxTargetSdkValues);
return null;
}
return value;
diff --git a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
index fd855e8..afded37 100644
--- a/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/GreylistConsumer.java
@@ -1,5 +1,7 @@
package com.android.class2greylist;
+import java.util.Map;
+
public interface GreylistConsumer {
/**
* Handle a new greylist entry.
@@ -7,7 +9,8 @@
* @param signature Signature of the member.
* @param maxTargetSdk maxTargetSdk value from the annotation, or null if none set.
*/
- void greylistEntry(String signature, Integer maxTargetSdk);
+ void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationProperties);
/**
* Handle a new whitelist entry.
diff --git a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
index ad5ad70..f86ac6e 100644
--- a/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
+++ b/tools/class2greylist/src/com/android/class2greylist/SystemOutGreylistConsumer.java
@@ -1,8 +1,11 @@
package com.android.class2greylist;
+import java.util.Map;
+
public class SystemOutGreylistConsumer implements GreylistConsumer {
@Override
- public void greylistEntry(String signature, Integer maxTargetSdk) {
+ public void greylistEntry(
+ String signature, Integer maxTargetSdk, Map<String, String> annotationValues) {
System.out.println(signature);
}
diff --git a/tools/class2greylist/test/Android.mk b/tools/class2greylist/test/Android.mk
index 23f4156..f35e74c 100644
--- a/tools/class2greylist/test/Android.mk
+++ b/tools/class2greylist/test/Android.mk
@@ -21,7 +21,7 @@
LOCAL_MODULE := class2greylisttest
-LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host
+LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host objenesis
# tag this module as a cts test artifact
LOCAL_COMPATIBILITY_SUITE := general-tests
@@ -29,4 +29,4 @@
include $(BUILD_HOST_JAVA_LIBRARY)
# Build the test APKs using their own makefiles
-include $(call all-makefiles-under,$(LOCAL_PATH))
\ No newline at end of file
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
index cb75dd3..b87a5b1 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/Class2GreylistTest.java
@@ -56,6 +56,31 @@
}
@Test
+ public void testReadGreylistMapNone() throws IOException {
+ Map<Integer, String> map = Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"none:noApi"});
+ verifyZeroInteractions(mStatus);
+ assertThat(map).containsExactly(null, "noApi");
+ }
+
+ @Test
+ public void testReadGreylistMapMulti() throws IOException {
+ Map<Integer, String> map = Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"1,none:noOr1Api", "3:apiThree"});
+ verifyZeroInteractions(mStatus);
+ assertThat(map).containsExactly(null, "noOr1Api", 1, "noOr1Api", 3, "apiThree");
+ }
+
+ @Test
+ public void testReadGreylistMapMulti2() throws IOException {
+ Map<Integer, String> map = Class2Greylist.readGreylistMap(mStatus,
+ new String[]{"1,none,2,3,4:allApi"});
+ verifyZeroInteractions(mStatus);
+ assertThat(map).containsExactly(
+ null, "allApi", 1, "allApi", 2, "allApi", 3, "allApi", 4, "allApi");
+ }
+
+ @Test
public void testReadGreylistMapDuplicate() throws IOException {
Class2Greylist.readGreylistMap(mStatus,
new String[]{"noApi", "1:apiOne", "1:anotherOne"});
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/FileWritingGreylistConsumerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/FileWritingGreylistConsumerTest.java
new file mode 100644
index 0000000..1e1b1df
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/FileWritingGreylistConsumerTest.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.mockito.Mock;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+public class FileWritingGreylistConsumerTest {
+
+ @Mock
+ Status mStatus;
+ @Rule
+ public TestName mTestName = new TestName();
+ private int mFileNameSeq = 0;
+ private final List<String> mTempFiles = new ArrayList<>();
+
+ @Before
+ public void setup() throws IOException {
+ System.out.println(String.format("\n============== STARTING TEST: %s ==============\n",
+ mTestName.getMethodName()));
+ initMocks(this);
+ }
+
+ @After
+ public void removeTempFiles() {
+ for (String name : mTempFiles) {
+ new File(name).delete();
+ }
+ }
+
+ private String tempFileName() {
+ String name = String.format(Locale.US, "/tmp/test-%s-%d",
+ mTestName.getMethodName(), mFileNameSeq++);
+ mTempFiles.add(name);
+ return name;
+ }
+
+ @Test
+ public void testSimpleMap() throws FileNotFoundException {
+ Map<Integer, PrintStream> streams = FileWritingGreylistConsumer.openFiles(
+ ImmutableMap.of(1, tempFileName(), 2, tempFileName()));
+ assertThat(streams.keySet()).containsExactly(1, 2);
+ assertThat(streams.get(1)).isNotNull();
+ assertThat(streams.get(2)).isNotNull();
+ assertThat(streams.get(2)).isNotSameAs(streams.get(1));
+ }
+
+ @Test
+ public void testCommonMappings() throws FileNotFoundException {
+ String name = tempFileName();
+ Map<Integer, PrintStream> streams = FileWritingGreylistConsumer.openFiles(
+ ImmutableMap.of(1, name, 2, name));
+ assertThat(streams.keySet()).containsExactly(1, 2);
+ assertThat(streams.get(1)).isNotNull();
+ assertThat(streams.get(2)).isSameAs(streams.get(1));
+ }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
index 1a4bfb8..edf2ecd 100644
--- a/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
+++ b/tools/class2greylist/test/src/com/android/class2greylist/GreylistAnnotationHandlerTest.java
@@ -60,7 +60,7 @@
Predicate<GreylistAnnotationHandler.GreylistMember> greylistFilter,
Set<Integer> validMaxTargetSdkValues) {
return new GreylistAnnotationHandler(
- mStatus, mConsumer, greylistFilter, validMaxTargetSdkValues);
+ mStatus, mConsumer, greylistFilter, x -> validMaxTargetSdkValues.contains(x));
}
@Test
@@ -80,7 +80,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -101,7 +101,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
}
@@ -122,7 +122,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
}
@@ -143,7 +143,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
}
@@ -184,7 +184,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
}
@@ -202,7 +202,7 @@
).visit();
assertNoErrors();
- verify(mConsumer, never()).greylistEntry(any(String.class), any());
+ verify(mConsumer, never()).greylistEntry(any(String.class), any(), any());
}
@Test
@@ -222,7 +222,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -252,7 +252,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -284,7 +284,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Class;->method(Ljava/lang/String;)V");
@@ -322,7 +322,7 @@
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// A bridge method is generated for the above, so we expect 2 greylist entries.
- verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(2)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getAllValues()).containsExactly(
"La/b/Class;->method(Ljava/lang/Object;)V",
"La/b/Base;->method(Ljava/lang/Object;)V");
@@ -355,14 +355,14 @@
mStatus,
mConsumer,
publicApis,
- emptySet()));
+ x -> false));
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Base"), mStatus, handlerMap).visit();
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
// The bridge method generated for the above, is a public API so should be excluded
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method(Ljava/lang/String;)V");
}
@@ -384,7 +384,7 @@
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
- verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any());
+ verify(mConsumer, times(1)).greylistEntry(greylist.capture(), any(), any());
assertThat(greylist.getValue()).isEqualTo("La/b/Class;->field:I");
}
@@ -423,7 +423,7 @@
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
- verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture(), any());
assertThat(maxTargetSdk.getValue()).isEqualTo(1);
}
@@ -445,7 +445,7 @@
new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus, handlerMap).visit();
assertNoErrors();
ArgumentCaptor<Integer> maxTargetSdk = ArgumentCaptor.forClass(Integer.class);
- verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture());
+ verify(mConsumer, times(1)).greylistEntry(any(), maxTargetSdk.capture(), any());
assertThat(maxTargetSdk.getValue()).isEqualTo(null);
}
@@ -468,4 +468,37 @@
verify(mStatus, times(1)).error(any(), any());
}
+ @Test
+ public void testAnnotationPropertiesIntoMap() throws IOException {
+ mJavac.addSource("annotation.Anno2", Joiner.on('\n').join(
+ "package annotation;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
+ "@Retention(CLASS)",
+ "public @interface Anno2 {",
+ " String expectedSignature() default \"\";",
+ " int maxTargetSdk() default Integer.MAX_VALUE;",
+ " long trackingBug() default 0;",
+ "}"));
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno2;",
+ "public class Class {",
+ " @Anno2(maxTargetSdk=2, trackingBug=123456789)",
+ " public int field;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), mStatus,
+ ImmutableMap.of("Lannotation/Anno2;", createGreylistHandler(x -> true,
+ ImmutableSet.of(2)))
+ ).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<Map<String, String>> properties = ArgumentCaptor.forClass(Map.class);
+ verify(mConsumer, times(1)).greylistEntry(any(), any(), properties.capture());
+ assertThat(properties.getValue()).containsExactly(
+ "maxTargetSdk", "2",
+ "trackingBug", "123456789");
+ }
+
}
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index 23cc917..027f128 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -14,16 +14,11 @@
// limitations under the License.
//
-// Build a "data" binary which will hold all the symbol values that will be parsed by the other scripts.
-//
-// Builds are for host only, target-specific define generation is possibly but is trickier and would need extra tooling.
-//
-// In the future we may wish to parameterize this on (32,64)x(read_barrier,no_read_barrier).
-
-cc_binary { // Do not use art_cc_binary because HOST_PREFER_32_BIT is incompatible with genrule.
- name: "cpp-define-generator-data",
+// This produces human-readable asm_defines.s with the embedded compile-time constants.
+cc_object {
+ name: "asm_defines.s",
host_supported: true,
- device_supported: false,
+ device_supported: true,
defaults: [
"art_debug_defaults",
"art_defaults",
@@ -33,20 +28,36 @@
"art/libdexfile",
"art/libartbase",
"art/runtime",
+ "system/core/base/include",
],
- srcs: ["main.cc"],
- shared_libs: [
- "libbase",
- ],
+ // Produce text file rather than binary.
+ cflags: ["-S"],
+ srcs: ["asm_defines.cc"],
}
-// Note: See $OUT_DIR/soong/build.ninja
-// For the exact filename that this generates to run make command on just
-// this rule later.
-genrule {
+// This extracts the compile-time constants from asm_defines.s and creates the header.
+cc_genrule {
name: "cpp-define-generator-asm-support",
- out: ["asm_support_gen.h"],
- tools: ["cpp-define-generator-data"],
- tool_files: ["verify-asm-support"],
- cmd: "$(location verify-asm-support) --quiet \"$(location cpp-define-generator-data)\" \"$(out)\"",
+ host_supported: true,
+ device_supported: true,
+ srcs: [":asm_defines.s"],
+ out: ["asm_defines.h"],
+ tool_files: ["make_header.py"],
+ cmd: "$(location make_header.py) \"$(in)\" > \"$(out)\"",
+}
+
+cc_library_headers {
+ name: "cpp-define-generator-definitions",
+ host_supported: true,
+ export_include_dirs: ["."],
+}
+
+python_binary_host {
+ name: "cpp-define-generator-test",
+ main: "make_header_test.py",
+ srcs: [
+ "make_header.py",
+ "make_header_test.py",
+ ],
+ test_suites: ["general-tests"],
}
diff --git a/tools/cpp-define-generator/art_method.def b/tools/cpp-define-generator/art_method.def
new file mode 100644
index 0000000..21859dc
--- /dev/null
+++ b/tools/cpp-define-generator/art_method.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "art_method.h"
+#endif
+
+ASM_DEFINE(ART_METHOD_ACCESS_FLAGS_OFFSET,
+ art::ArtMethod::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_DECLARING_CLASS_OFFSET,
+ art::ArtMethod::DeclaringClassOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_JNI_OFFSET_32,
+ art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(ART_METHOD_JNI_OFFSET_64,
+ art::ArtMethod::EntryPointFromJniOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_32,
+ art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_64,
+ art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())
diff --git a/tools/cpp-define-generator/asm_defines.cc b/tools/cpp-define-generator/asm_defines.cc
new file mode 100644
index 0000000..b79e1ae
--- /dev/null
+++ b/tools/cpp-define-generator/asm_defines.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// This file is used to generate #defines for use in assembly source code.
+//
+// The content of this file will be used to compile an object file
+// (generated as human readable assembly text file, not as binary).
+// This text file will then be post-processed by a python script to find
+// and extract the constants and generate the final asm_defines.h header.
+//
+
+// We use "asm volatile" to generate text that will stand out in the
+// compiler generated intermediate assembly file (eg. ">>FOO 42 0<<").
+// We emit all values as 64-bit integers (which we will printed as text).
+// We also store a flag which specifies whether the constant is negative.
+// Note that "asm volatile" must be inside a method to please the compiler.
+#define ASM_DEFINE(NAME, EXPR) \
+void AsmDefineHelperFor_##NAME() { \
+ asm volatile("\n.ascii \">>" #NAME " %0 %1<<\"" \
+ :: "i" (static_cast<int64_t>(EXPR)), "i" ((EXPR) < 0 ? 1 : 0)); \
+}
+#include "asm_defines.def"
diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def
new file mode 100644
index 0000000..7a77e8e
--- /dev/null
+++ b/tools/cpp-define-generator/asm_defines.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if !defined(ASM_DEFINE_INCLUDE_DEPENDENCIES)
+#define ASM_DEFINE_INCLUDE_DEPENDENCIES 1
+#endif
+
+#include "globals.def"
+#include "art_method.def"
+#include "lockword.def"
+#include "mirror_array.def"
+#include "mirror_class.def"
+#include "mirror_dex_cache.def"
+#include "mirror_object.def"
+#include "mirror_string.def"
+#include "rosalloc.def"
+#include "runtime.def"
+#include "shadow_frame.def"
+#include "thread.def"
diff --git a/tools/cpp-define-generator/common.def b/tools/cpp-define-generator/common.def
deleted file mode 100644
index 76c64c9..0000000
--- a/tools/cpp-define-generator/common.def
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Convenience macro to define an offset expression.
-
-#ifndef DEFINE_OFFSET_EXPR
-#define DEFINE_OFFSET_EXPR(holder_type, field_name, field_type, expr) \
- DEFINE_EXPR(holder_type ## _ ## field_name ## _OFFSET, field_type, expr)
-#define DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#endif
-
diff --git a/tools/cpp-define-generator/common_undef.def b/tools/cpp-define-generator/common_undef.def
deleted file mode 100644
index c44aba7..0000000
--- a/tools/cpp-define-generator/common_undef.def
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifdef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#undef DEFINE_OFFSET_EXPR_STANDARD_DEFINITION
-#undef DEFINE_OFFSET_EXPR
-#endif
diff --git a/tools/cpp-define-generator/constant_card_table.def b/tools/cpp-define-generator/constant_card_table.def
deleted file mode 100644
index ae3e8f3..0000000
--- a/tools/cpp-define-generator/constant_card_table.def
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export heap values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/accounting/card_table.h"
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(CARD_TABLE_CARD_SHIFT, size_t, art::gc::accounting::CardTable::kCardShift)
-
diff --git a/tools/cpp-define-generator/constant_class.def b/tools/cpp-define-generator/constant_class.def
deleted file mode 100644
index 1310103..0000000
--- a/tools/cpp-define-generator/constant_class.def
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/bit_utils.h" // MostSignificantBit
-#include "dex/modifiers.h" // kAccClassIsFinalizable
-#endif
-
-#define DEFINE_FLAG_OFFSET(type_name, field_name, expr) \
- DEFINE_EXPR(type_name ## _ ## field_name, uint32_t, (expr))
-
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE, art::kAccClassIsFinalizable)
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_INTERFACE, art::kAccInterface)
-// TODO: We should really have a BitPosition which also checks it's a power of 2.
-DEFINE_FLAG_OFFSET(ACCESS_FLAGS, CLASS_IS_FINALIZABLE_BIT, art::MostSignificantBit(art::kAccClassIsFinalizable))
-
-#undef DEFINE_FLAG_OFFSET
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
deleted file mode 100644
index 743ebb7..0000000
--- a/tools/cpp-define-generator/constant_dexcache.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/dex_cache.h" // art::mirror::DexCache, StringDexCachePair
-#endif
-
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT, int32_t,
- art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
-DEFINE_EXPR(STRING_DEX_CACHE_SIZE_MINUS_ONE, int32_t,
- art::mirror::DexCache::kDexCacheStringCacheSize - 1)
-DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS, int32_t,
- art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE, int32_t,
- sizeof(art::mirror::StringDexCachePair))
-DEFINE_EXPR(METHOD_DEX_CACHE_SIZE_MINUS_ONE, int32_t,
- art::mirror::DexCache::kDexCacheMethodCacheSize - 1)
-DEFINE_EXPR(METHOD_DEX_CACHE_HASH_BITS, int32_t,
- art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))
diff --git a/tools/cpp-define-generator/constant_globals.def b/tools/cpp-define-generator/constant_globals.def
deleted file mode 100644
index d0d6350..0000000
--- a/tools/cpp-define-generator/constant_globals.def
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export global values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include <atomic> // std::memory_order_relaxed
-#include "base/globals.h" // art::kObjectAlignment
-#include "dex/modifiers.h"
-#endif
-
-DEFINE_EXPR(STD_MEMORY_ORDER_RELAXED, int32_t, std::memory_order_relaxed)
-
-#define DEFINE_OBJECT_EXPR(macro_name, type, constant_field_name) \
- DEFINE_EXPR(OBJECT_ ## macro_name, type, constant_field_name)
-
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK, size_t, art::kObjectAlignment - 1)
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED, uint32_t, ~static_cast<uint32_t>(art::kObjectAlignment - 1))
-DEFINE_OBJECT_EXPR(ALIGNMENT_MASK_TOGGLED64, uint64_t, ~static_cast<uint64_t>(art::kObjectAlignment - 1))
-
-DEFINE_EXPR(ACC_OBSOLETE_METHOD, int32_t, art::kAccObsoleteMethod)
-DEFINE_EXPR(ACC_OBSOLETE_METHOD_SHIFT, int32_t, art::WhichPowerOf2(art::kAccObsoleteMethod))
-
-#undef DEFINE_OBJECT_EXPR
-
diff --git a/tools/cpp-define-generator/constant_heap.def b/tools/cpp-define-generator/constant_heap.def
deleted file mode 100644
index dc76736..0000000
--- a/tools/cpp-define-generator/constant_heap.def
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export heap values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/heap.h"
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(MIN_LARGE_OBJECT_THRESHOLD, size_t, art::gc::Heap::kMinLargeObjectThreshold)
-
diff --git a/tools/cpp-define-generator/constant_jit.def b/tools/cpp-define-generator/constant_jit.def
deleted file mode 100644
index 5fa5194..0000000
--- a/tools/cpp-define-generator/constant_jit.def
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within jit.h.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "jit/jit.h" // art::kSuspendRequest, etc.
-#endif
-
-#define DEFINE_JIT_CONSTANT(macro_name, type, expr) \
- DEFINE_EXPR(JIT_ ## macro_name, type, (expr))
-
-DEFINE_JIT_CONSTANT(CHECK_OSR, int16_t, art::jit::kJitCheckForOSR)
-DEFINE_JIT_CONSTANT(HOTNESS_DISABLE, int16_t, art::jit::kJitHotnessDisabled)
-
-#undef DEFINE_JIT_CONSTANT
diff --git a/tools/cpp-define-generator/constant_lockword.def b/tools/cpp-define-generator/constant_lockword.def
deleted file mode 100644
index 977d1ca..0000000
--- a/tools/cpp-define-generator/constant_lockword.def
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Export lockword values.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "lock_word.h" // art::LockWord
-#endif
-
-#define DEFINE_LOCK_WORD_EXPR(macro_name, type, constant_field_name) \
- DEFINE_EXPR(LOCK_WORD_ ## macro_name, type, art::LockWord::constant_field_name)
-
-// FIXME: The naming is inconsistent, the `Shifted` -> `_SHIFTED` suffix is sometimes missing.
-DEFINE_LOCK_WORD_EXPR(STATE_SHIFT, int32_t, kStateShift)
-DEFINE_LOCK_WORD_EXPR(STATE_MASK_SHIFTED, uint32_t, kStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_SHIFT, int32_t, kReadBarrierStateShift)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK, uint32_t, kReadBarrierStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(READ_BARRIER_STATE_MASK_TOGGLED, uint32_t, kReadBarrierStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SIZE, int32_t, kThinLockCountSize)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_SHIFT, int32_t, kThinLockCountShift)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_MASK_SHIFTED, uint32_t, kThinLockCountMaskShifted)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_COUNT_ONE, uint32_t, kThinLockCountOne)
-DEFINE_LOCK_WORD_EXPR(THIN_LOCK_OWNER_MASK_SHIFTED, uint32_t, kThinLockOwnerMaskShifted)
-
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS, uint32_t, kStateForwardingAddress)
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_OVERFLOW, uint32_t, kStateForwardingAddressOverflow)
-DEFINE_LOCK_WORD_EXPR(STATE_FORWARDING_ADDRESS_SHIFT, uint32_t, kForwardingAddressShift)
-
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED, uint32_t, kGCStateMaskShifted)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_MASK_SHIFTED_TOGGLED, uint32_t, kGCStateMaskShiftedToggled)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SIZE, int32_t, kGCStateSize)
-DEFINE_LOCK_WORD_EXPR(GC_STATE_SHIFT, int32_t, kGCStateShift)
-
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_SHIFT, int32_t, kMarkBitStateShift)
-DEFINE_LOCK_WORD_EXPR(MARK_BIT_MASK_SHIFTED, uint32_t, kMarkBitStateMaskShifted)
-
-#undef DEFINE_LOCK_WORD_EXPR
-
diff --git a/tools/cpp-define-generator/constant_reference.def b/tools/cpp-define-generator/constant_reference.def
deleted file mode 100644
index d312f76..0000000
--- a/tools/cpp-define-generator/constant_reference.def
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/object.h" // mirror::Object
-#include "stack.h" // StackReference
-#include "mirror/object_reference.h" // mirror::CompressedReference
-#include "base/bit_utils.h" // WhichPowerOf2
-#endif
-
-// Size of references to the heap on the stack.
-DEFINE_EXPR(STACK_REFERENCE_SIZE, size_t, sizeof(art::StackReference<art::mirror::Object>))
-// Size of heap references
-DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE, size_t, sizeof(art::mirror::CompressedReference<art::mirror::Object>))
-DEFINE_EXPR(COMPRESSED_REFERENCE_SIZE_SHIFT, size_t, art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))
-
-#undef DEFINE_REFERENCE_OFFSET
diff --git a/tools/cpp-define-generator/constant_rosalloc.def b/tools/cpp-define-generator/constant_rosalloc.def
deleted file mode 100644
index 2007cef..0000000
--- a/tools/cpp-define-generator/constant_rosalloc.def
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within RosAlloc.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "gc/allocator/rosalloc.h" // art::gc::allocator::RosAlloc
-#endif
-
-#define DEFINE_ROSALLOC_CONSTANT(macro_name, type, expr) \
- DEFINE_EXPR(ROSALLOC_ ## macro_name, type, (expr))
-
-DEFINE_ROSALLOC_CONSTANT(MAX_THREAD_LOCAL_BRACKET_SIZE, int32_t, art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize)
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_SHIFT, int32_t, art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift)
-// TODO: This should be a BitUtils helper, e.g. BitMaskFromSize or something like that.
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK, int32_t, static_cast<int32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED32,\
- uint32_t, ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(BRACKET_QUANTUM_SIZE_MASK_TOGGLED64,\
- uint64_t, ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListOffset())
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_HEAD_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListHeadOffset())
-DEFINE_ROSALLOC_CONSTANT(RUN_FREE_LIST_SIZE_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunFreeListSizeOffset())
-DEFINE_ROSALLOC_CONSTANT(SLOT_NEXT_OFFSET, int32_t, art::gc::allocator::RosAlloc::RunSlotNextOffset())
-
-
-#undef DEFINE_ROSALLOC_CONSTANT
diff --git a/tools/cpp-define-generator/constant_thread.def b/tools/cpp-define-generator/constant_thread.def
deleted file mode 100644
index 1364b55..0000000
--- a/tools/cpp-define-generator/constant_thread.def
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Constants within thread.h.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "thread.h" // art::kSuspendRequest, etc.
-#endif
-
-#define DEFINE_THREAD_CONSTANT(macro_name, type, expr) \
- DEFINE_EXPR(THREAD_ ## macro_name, type, (expr))
-
-DEFINE_THREAD_CONSTANT(SUSPEND_REQUEST, int32_t, art::kSuspendRequest)
-DEFINE_THREAD_CONSTANT(CHECKPOINT_REQUEST, int32_t, art::kCheckpointRequest)
-DEFINE_THREAD_CONSTANT(EMPTY_CHECKPOINT_REQUEST, int32_t, art::kEmptyCheckpointRequest)
-DEFINE_THREAD_CONSTANT(SUSPEND_OR_CHECKPOINT_REQUEST, int32_t, art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
-
-#undef DEFINE_THREAD_CONSTANT
diff --git a/tools/cpp-define-generator/generate-asm-support b/tools/cpp-define-generator/generate-asm-support
deleted file mode 100755
index fcdf72f..0000000
--- a/tools/cpp-define-generator/generate-asm-support
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# Generates asm_support_gen.h
-# - This must be run after a build since it uses cpp-define-generator-data
-
-[[ -z ${ANDROID_BUILD_TOP+x} ]] && (echo "Run source build/envsetup.sh first" >&2 && exit 1)
-
-cpp-define-generator-data > ${ANDROID_BUILD_TOP}/art/runtime/generated/asm_support_gen.h
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
new file mode 100644
index 0000000..6443a0c
--- /dev/null
+++ b/tools/cpp-define-generator/globals.def
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "dex/modifiers.h"
+#include "gc/accounting/card_table.h"
+#include "gc/heap.h"
+#include "interpreter/mterp/mterp.h"
+#include "jit/jit.h"
+#include "mirror/object.h"
+#include "mirror/object_reference.h"
+#include "stack.h"
+#endif
+
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE,
+ art::kAccClassIsFinalizable)
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT,
+ art::MostSignificantBit(art::kAccClassIsFinalizable))
+ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_INTERFACE,
+ art::kAccInterface)
+ASM_DEFINE(ACC_OBSOLETE_METHOD,
+ art::kAccObsoleteMethod)
+ASM_DEFINE(ACC_OBSOLETE_METHOD_SHIFT,
+ art::WhichPowerOf2(art::kAccObsoleteMethod))
+ASM_DEFINE(CARD_TABLE_CARD_SHIFT,
+ art::gc::accounting::CardTable::kCardShift)
+ASM_DEFINE(COMPRESSED_REFERENCE_SIZE,
+ sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+ASM_DEFINE(COMPRESSED_REFERENCE_SIZE_SHIFT,
+ art::WhichPowerOf2(sizeof(art::mirror::CompressedReference<art::mirror::Object>)))
+ASM_DEFINE(JIT_CHECK_OSR,
+ art::jit::kJitCheckForOSR)
+ASM_DEFINE(JIT_HOTNESS_DISABLE,
+ art::jit::kJitHotnessDisabled)
+ASM_DEFINE(MIN_LARGE_OBJECT_THRESHOLD,
+ art::gc::Heap::kMinLargeObjectThreshold)
+ASM_DEFINE(MTERP_HANDLER_SIZE,
+ art::interpreter::kMterpHandlerSize)
+ASM_DEFINE(MTERP_HANDLER_SIZE_LOG2,
+ art::WhichPowerOf2(art::interpreter::kMterpHandlerSize))
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK,
+ art::kObjectAlignment - 1)
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED,
+ ~static_cast<uint32_t>(art::kObjectAlignment - 1))
+ASM_DEFINE(OBJECT_ALIGNMENT_MASK_TOGGLED64,
+ ~static_cast<uint64_t>(art::kObjectAlignment - 1))
+ASM_DEFINE(POINTER_SIZE,
+ static_cast<size_t>(art::kRuntimePointerSize))
+ASM_DEFINE(POINTER_SIZE_SHIFT,
+ art::WhichPowerOf2(static_cast<size_t>(art::kRuntimePointerSize)))
+ASM_DEFINE(STACK_REFERENCE_SIZE,
+ sizeof(art::StackReference<art::mirror::Object>))
+ASM_DEFINE(STD_MEMORY_ORDER_RELAXED,
+ std::memory_order_relaxed)
diff --git a/tools/cpp-define-generator/lockword.def b/tools/cpp-define-generator/lockword.def
new file mode 100644
index 0000000..a170c15
--- /dev/null
+++ b/tools/cpp-define-generator/lockword.def
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "lock_word.h"
+#endif
+
+ASM_DEFINE(LOCK_WORD_GC_STATE_MASK_SHIFTED,
+ art::LockWord::kGCStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED,
+ art::LockWord::kGCStateMaskShiftedToggled)
+ASM_DEFINE(LOCK_WORD_GC_STATE_SHIFT,
+ art::LockWord::kGCStateShift)
+ASM_DEFINE(LOCK_WORD_GC_STATE_SIZE,
+ art::LockWord::kGCStateSize)
+ASM_DEFINE(LOCK_WORD_MARK_BIT_MASK_SHIFTED,
+ art::LockWord::kMarkBitStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_MARK_BIT_SHIFT,
+ art::LockWord::kMarkBitStateShift)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK,
+ art::LockWord::kReadBarrierStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED,
+ art::LockWord::kReadBarrierStateMaskShiftedToggled)
+ASM_DEFINE(LOCK_WORD_READ_BARRIER_STATE_SHIFT,
+ art::LockWord::kReadBarrierStateShift)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS,
+ art::LockWord::kStateForwardingAddress)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW,
+ art::LockWord::kStateForwardingAddressOverflow)
+ASM_DEFINE(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT,
+ art::LockWord::kForwardingAddressShift)
+ASM_DEFINE(LOCK_WORD_STATE_MASK_SHIFTED,
+ art::LockWord::kStateMaskShifted)
+ASM_DEFINE(LOCK_WORD_STATE_SHIFT,
+ art::LockWord::kStateShift)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED,
+ art::LockWord::kThinLockCountMaskShifted)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_ONE,
+ art::LockWord::kThinLockCountOne)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_SHIFT,
+ art::LockWord::kThinLockCountShift)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_COUNT_SIZE,
+ art::LockWord::kThinLockCountSize)
+ASM_DEFINE(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED,
+ art::LockWord::kThinLockOwnerMaskShifted)
diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc
deleted file mode 100644
index 7c515be..0000000
--- a/tools/cpp-define-generator/main.cc
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <ios>
-#include <iostream>
-#include <sstream>
-#include <string>
-#include <type_traits>
-
-// Art Offset file dependencies
-#define DEFINE_INCLUDE_DEPENDENCIES
-#include "offsets_all.def"
-
-std::string to_upper(std::string input) {
- std::transform(input.begin(), input.end(), input.begin(), ::toupper);
- return input;
-}
-
-template <typename T, typename = void>
-typename std::enable_if<!std::is_signed<T>::value, std::string>::type
-pretty_format(T value) {
- // Print most values as hex.
- std::stringstream ss;
- ss << std::showbase << std::hex << value;
- return ss.str();
-}
-
-template <typename T, typename = void>
-typename std::enable_if<std::is_signed<T>::value, std::string>::type
-pretty_format(T value) {
- // Print "signed" values as decimal so that the negativity doesn't get lost.
- std::stringstream ss;
-
- // For negative values add a (). Omit it from positive values for conciseness.
- if (value < 0) {
- ss << "(";
- }
-
- ss << value;
-
- if (value < 0) {
- ss << ")";
- }
- return ss.str();
-}
-
-template <typename T>
-void cpp_define(const std::string& name, T value) {
- std::cout << "#define " << name << " " << pretty_format(value) << std::endl;
-}
-
-template <typename T>
-void emit_check_eq(T value, const std::string& expr) {
- std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl;
-}
-
-const char *kFileHeader = /* // NOLINT [readability/multiline_string] [5] */ R"L1C3NS3(
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-#define ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-
-// This file has been auto-generated by cpp-define-generator; do not edit directly.
-)L1C3NS3"; // NOLINT [readability/multiline_string] [5]
-
-const char *kFileFooter = /* // NOLINT [readability/multiline_string] [5] */ R"F00T3R(
-#endif // ART_RUNTIME_GENERATED_ASM_SUPPORT_GEN_H_
-)F00T3R"; // NOLINT [readability/multiline_string] [5]
-
-#define MACROIZE(holder_type, field_name) to_upper(#holder_type "_" #field_name "_OFFSET")
-
-int main() {
- std::cout << kFileHeader << std::endl;
-
- std::string z = "";
-
- // Print every constant expression to stdout as a #define or a CHECK_EQ
-#define DEFINE_EXPR(macro_name, field_type, expr) \
- cpp_define(to_upper(#macro_name), static_cast<field_type>(expr)); \
- emit_check_eq(z + "static_cast<" #field_type ">(" + to_upper(#macro_name) + ")", \
- "static_cast<" #field_type ">(" #expr ")");
-#include "offsets_all.def"
-
- std::cout << kFileFooter << std::endl;
- return 0;
-}
diff --git a/tools/cpp-define-generator/make_header.py b/tools/cpp-define-generator/make_header.py
new file mode 100755
index 0000000..1b13923
--- /dev/null
+++ b/tools/cpp-define-generator/make_header.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script looks through compiled object file (stored human readable text),
+# and looks for the compile-time constants (added through custom "asm" block).
+# For example: .ascii ">>OBJECT_ALIGNMENT_MASK $7 $0<<"
+#
+# It will transform each such line to #define which is usabe in assembly code.
+# For example: #define OBJECT_ALIGNMENT_MASK 0x7
+#
+# Usage: make_header.py out/soong/.intermediates/.../asm_defines.o
+#
+
+import argparse
+import re
+import sys
+
+def convert(input):
+ """Find all defines in the compiler generated assembly and convert them to #define pragmas"""
+
+ asm_define_re = re.compile(r'">>(\w+) (?:\$|#)([-0-9]+) (?:\$|#)(0|1)<<"')
+ asm_defines = asm_define_re.findall(input)
+ if not asm_defines:
+ raise RuntimeError("Failed to find any asm defines in the input")
+
+ # Convert the found constants to #define pragmas.
+ # In case the C++ compiler decides to reorder the AsmDefinesFor_${name} functions,
+ # we don't want the order of the .h file to change from one compilation to another.
+ # Sorting ensures deterministic order of the #defines.
+ output = []
+ for name, value, negative_value in sorted(asm_defines):
+ value = int(value)
+ if value < 0 and negative_value == "0":
+ # Overflow - uint64_t constant was pretty printed as negative value.
+ value += 2 ** 64 # Python will use arbitrary precision arithmetic.
+ output.append("#define {0} {1:#x}".format(name, value))
+ return "\n".join(output)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('input', help="Object file as text")
+ args = parser.parse_args()
+ print(convert(open(args.input, "r").read()))
diff --git a/tools/cpp-define-generator/make_header_test.py b/tools/cpp-define-generator/make_header_test.py
new file mode 100755
index 0000000..a484285
--- /dev/null
+++ b/tools/cpp-define-generator/make_header_test.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import make_header
+
+test_input = r'''
+// Check that the various other assembly lines are ignored.
+.globl _Z49AsmDefineHelperFor_MIRROR_OBJECT_LOCK_WORD_OFFSETv
+.type _Z49AsmDefineHelperFor_MIRROR_OBJECT_LOCK_WORD_OFFSETv,%function
+.ascii ">>MIRROR_OBJECT_LOCK_WORD_OFFSET #4 #0<<"
+bx lr
+
+// Check large positive 32-bit constant.
+.ascii ">>OBJECT_ALIGNMENT_MASK_TOGGLED #4294967288 #0<<"
+
+// Check large positive 64-bit constant (it overflows into negative value).
+.ascii ">>OBJECT_ALIGNMENT_MASK_TOGGLED64 #-8 #0<<"
+
+// Check negative constant.
+.ascii ">>JIT_CHECK_OSR #-1 #1<<"
+'''
+
+test_output = r'''
+#define JIT_CHECK_OSR -0x1
+#define MIRROR_OBJECT_LOCK_WORD_OFFSET 0x4
+#define OBJECT_ALIGNMENT_MASK_TOGGLED 0xfffffff8
+#define OBJECT_ALIGNMENT_MASK_TOGGLED64 0xfffffffffffffff8
+'''
+
+class CppDefineGeneratorTest(unittest.TestCase):
+ def test_convert(self):
+ self.assertEqual(test_output.strip(), make_header.convert(test_input))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/cpp-define-generator/mirror_array.def b/tools/cpp-define-generator/mirror_array.def
new file mode 100644
index 0000000..f600b41
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_array.def
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/array.h"
+#endif
+
+ASM_DEFINE(MIRROR_ARRAY_LENGTH_OFFSET,
+ art::mirror::Array::LengthOffset().Int32Value())
+ASM_DEFINE(MIRROR_BOOLEAN_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint8_t)).Int32Value())
+ASM_DEFINE(MIRROR_BYTE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int8_t)).Int32Value())
+ASM_DEFINE(MIRROR_CHAR_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value())
+ASM_DEFINE(MIRROR_INT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value())
+ASM_DEFINE(MIRROR_LONG_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
+ASM_DEFINE(MIRROR_OBJECT_ARRAY_COMPONENT_SIZE,
+ sizeof(art::mirror::HeapReference<art::mirror::Object>))
+ASM_DEFINE(MIRROR_OBJECT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(art::mirror::HeapReference<art::mirror::Object>)).Int32Value())
+ASM_DEFINE(MIRROR_SHORT_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(int16_t)).Int32Value())
+ASM_DEFINE(MIRROR_WIDE_ARRAY_DATA_OFFSET,
+ art::mirror::Array::DataOffset(sizeof(uint64_t)).Int32Value())
diff --git a/tools/cpp-define-generator/mirror_class.def b/tools/cpp-define-generator/mirror_class.def
new file mode 100644
index 0000000..c15ae92
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_class.def
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/class.h"
+#endif
+
+ASM_DEFINE(MIRROR_CLASS_ACCESS_FLAGS_OFFSET,
+ art::mirror::Class::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_COMPONENT_TYPE_OFFSET,
+ art::mirror::Class::ComponentTypeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_DEX_CACHE_OFFSET,
+ art::mirror::Class::DexCacheOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IF_TABLE_OFFSET,
+ art::mirror::Class::IfTableOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
+ art::mirror::Class::PrimitiveTypeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
+ art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
+ art::mirror::Class::ObjectSizeOffset().Int32Value())
+ASM_DEFINE(MIRROR_CLASS_STATUS_OFFSET,
+ art::mirror::Class::StatusOffset().Int32Value())
+ASM_DEFINE(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
+ art::mirror::Class::kPrimitiveTypeSizeShiftShift)
diff --git a/tools/cpp-define-generator/mirror_dex_cache.def b/tools/cpp-define-generator/mirror_dex_cache.def
new file mode 100644
index 0000000..5272e86
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_dex_cache.def
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/dex_cache.h"
+#endif
+
+ASM_DEFINE(METHOD_DEX_CACHE_SIZE_MINUS_ONE,
+ art::mirror::DexCache::kDexCacheMethodCacheSize - 1)
+ASM_DEFINE(MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET,
+ art::mirror::DexCache::ResolvedMethodsOffset().Int32Value())
+ASM_DEFINE(STRING_DEX_CACHE_ELEMENT_SIZE,
+ sizeof(art::mirror::StringDexCachePair))
+ASM_DEFINE(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT,
+ art::WhichPowerOf2(sizeof(art::mirror::StringDexCachePair)))
+ASM_DEFINE(STRING_DEX_CACHE_HASH_BITS,
+ art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
+ASM_DEFINE(STRING_DEX_CACHE_SIZE_MINUS_ONE,
+ art::mirror::DexCache::kDexCacheStringCacheSize - 1)
+ASM_DEFINE(METHOD_DEX_CACHE_HASH_BITS,
+ art::LeastSignificantBit(art::mirror::DexCache::kDexCacheMethodCacheSize))
diff --git a/tools/cpp-define-generator/mirror_object.def b/tools/cpp-define-generator/mirror_object.def
new file mode 100644
index 0000000..facb037
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_object.def
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/object.h"
+#endif
+
+ASM_DEFINE(MIRROR_OBJECT_CLASS_OFFSET,
+ art::mirror::Object::ClassOffset().Int32Value())
+ASM_DEFINE(MIRROR_OBJECT_HEADER_SIZE,
+ sizeof(art::mirror::Object))
+ASM_DEFINE(MIRROR_OBJECT_LOCK_WORD_OFFSET,
+ art::mirror::Object::MonitorOffset().Int32Value())
diff --git a/tools/cpp-define-generator/mirror_string.def b/tools/cpp-define-generator/mirror_string.def
new file mode 100644
index 0000000..3632b96
--- /dev/null
+++ b/tools/cpp-define-generator/mirror_string.def
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "mirror/string.h"
+#endif
+
+ASM_DEFINE(MIRROR_STRING_COUNT_OFFSET,
+ art::mirror::String::CountOffset().Int32Value())
+ASM_DEFINE(MIRROR_STRING_VALUE_OFFSET,
+ art::mirror::String::ValueOffset().Int32Value())
+ASM_DEFINE(STRING_COMPRESSION_FEATURE,
+ art::mirror::kUseStringCompression)
diff --git a/tools/cpp-define-generator/offset_art_method.def b/tools/cpp-define-generator/offset_art_method.def
deleted file mode 100644
index e6a0907..0000000
--- a/tools/cpp-define-generator/offset_art_method.def
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within art::ArtMethod.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "art_method.h" // art::ArtMethod
-#include "base/enums.h" // PointerSize
-#include "mirror/dex_cache.h" // art::DexCache
-#endif
-
-#define DEFINE_ART_METHOD_OFFSET_SIZED(field_name, method_name) \
- DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_32, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k32).Int32Value()) \
- DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET_64, int32_t, art::ArtMethod::method_name##Offset(art::PointerSize::k64).Int32Value())
-
-#define DEFINE_ART_METHOD_OFFSET(field_name, method_name) \
- DEFINE_EXPR(ART_METHOD_ ## field_name ## _OFFSET, int32_t, art::ArtMethod::method_name##Offset().Int32Value())
-
-#define DEFINE_DECLARING_CLASS_OFFSET(field_name, method_name) \
- DEFINE_EXPR(DECLARING_CLASS_ ## field_name ## _OFFSET, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_ART_METHOD_OFFSET_SIZED(JNI, EntryPointFromJni)
-DEFINE_ART_METHOD_OFFSET_SIZED(QUICK_CODE, EntryPointFromQuickCompiledCode)
-DEFINE_ART_METHOD_OFFSET(DECLARING_CLASS, DeclaringClass)
-DEFINE_ART_METHOD_OFFSET(ACCESS_FLAGS, AccessFlags)
-
-#undef DEFINE_ART_METHOD_OFFSET
-#undef DEFINE_ART_METHOD_OFFSET_32
-#undef DEFINE_DECLARING_CLASS_OFFSET
diff --git a/tools/cpp-define-generator/offset_mirror_class.def b/tools/cpp-define-generator/offset_mirror_class.def
deleted file mode 100644
index 9b7bfce..0000000
--- a/tools/cpp-define-generator/offset_mirror_class.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.Class (mirror::Class).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/class.h" // art::mirror::Object
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_CLASS_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(MIRROR_CLASS, field_name, int32_t, art::mirror::Class::method_name##Offset().Int32Value())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_MIRROR_CLASS_OFFSET(DEX_CACHE, DexCache)
-
-#undef DEFINE_MIRROR_CLASS_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_mirror_dex_cache.def b/tools/cpp-define-generator/offset_mirror_dex_cache.def
deleted file mode 100644
index 8f008bb..0000000
--- a/tools/cpp-define-generator/offset_mirror_dex_cache.def
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.DexCache (mirror::DexCache).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/class.h" // art::mirror::Object
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_DEX_CACHE_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(MIRROR_DEX_CACHE, field_name, int32_t, art::mirror::DexCache::method_name##Offset().Int32Value())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_MIRROR_DEX_CACHE_OFFSET(RESOLVED_METHODS, ResolvedMethods)
-
-#undef DEFINE_MIRROR_CLASS_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_mirror_object.def b/tools/cpp-define-generator/offset_mirror_object.def
deleted file mode 100644
index 9b99634..0000000
--- a/tools/cpp-define-generator/offset_mirror_object.def
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within java.lang.Object (mirror::Object).
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "mirror/object.h" // art::mirror::Object
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-#define DEFINE_MIRROR_OBJECT_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(MIRROR_OBJECT, field_name, int32_t, art::mirror::Object::method_name##Offset().Int32Value())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_MIRROR_OBJECT_OFFSET(CLASS, Class)
-DEFINE_MIRROR_OBJECT_OFFSET(LOCK_WORD, Monitor)
-
-#undef DEFINE_MIRROR_OBJECT_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_runtime.def b/tools/cpp-define-generator/offset_runtime.def
deleted file mode 100644
index 1d5ce7d..0000000
--- a/tools/cpp-define-generator/offset_runtime.def
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/callee_save_type.h" // art::CalleeSaveType
-#include "runtime.h" // art::Runtime
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-// Note: these callee save methods loads require read barriers.
-
-#define DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(field_name, constant_name) \
- DEFINE_OFFSET_EXPR(Runtime, \
- field_name ## _METHOD, \
- size_t, \
- art::Runtime::GetCalleeSaveMethodOffset(constant_name))
-
- // Macro substring Constant name
-// Offset of field Runtime::callee_save_methods_[kSaveAllCalleeSaves]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_ALL_CALLEE_SAVES, art::CalleeSaveType::kSaveAllCalleeSaves)
-// Offset of field Runtime::callee_save_methods_[kSaveRefsOnly]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_ONLY, art::CalleeSaveType::kSaveRefsOnly)
-// Offset of field Runtime::callee_save_methods_[kSaveRefsAndArgs]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_REFS_AND_ARGS, art::CalleeSaveType::kSaveRefsAndArgs)
-// Offset of field Runtime::callee_save_methods_[kSaveEverything]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING, art::CalleeSaveType::kSaveEverything)
-// Offset of field Runtime::callee_save_methods_[kSaveEverythingForClinit]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING_FOR_CLINIT, art::CalleeSaveType::kSaveEverythingForClinit)
-// Offset of field Runtime::callee_save_methods_[kSaveEverythingForSuspendCheck]
-DEFINE_RUNTIME_CALLEE_SAVE_OFFSET(SAVE_EVERYTHING_FOR_SUSPEND_CHECK, art::CalleeSaveType::kSaveEverythingForSuspendCheck)
-
-#undef DEFINE_RUNTIME_CALLEE_SAVE_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_shadow_frame.def b/tools/cpp-define-generator/offset_shadow_frame.def
deleted file mode 100644
index b49a340..0000000
--- a/tools/cpp-define-generator/offset_shadow_frame.def
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "stack.h" // art::ShadowFrame
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-#define DEFINE_SHADOW_FRAME_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(ShadowFrame, field_name, int32_t, art::ShadowFrame::method_name##Offset())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_SHADOW_FRAME_OFFSET(LINK, Link)
-DEFINE_SHADOW_FRAME_OFFSET(METHOD, Method)
-DEFINE_SHADOW_FRAME_OFFSET(RESULT_REGISTER, ResultRegister)
-DEFINE_SHADOW_FRAME_OFFSET(DEX_PC_PTR, DexPCPtr)
-DEFINE_SHADOW_FRAME_OFFSET(CODE_ITEM, CodeItem)
-DEFINE_SHADOW_FRAME_OFFSET(LOCK_COUNT_DATA, LockCountData)
-DEFINE_SHADOW_FRAME_OFFSET(NUMBER_OF_VREGS, NumberOfVRegs)
-DEFINE_SHADOW_FRAME_OFFSET(DEX_PC, DexPC)
-DEFINE_SHADOW_FRAME_OFFSET(CACHED_HOTNESS_COUNTDOWN, CachedHotnessCountdown)
-DEFINE_SHADOW_FRAME_OFFSET(VREGS, VRegs)
-
-#undef DEFINE_SHADOW_FRAME_OFFSET
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offset_thread.def b/tools/cpp-define-generator/offset_thread.def
deleted file mode 100644
index 6f94d38..0000000
--- a/tools/cpp-define-generator/offset_thread.def
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Offsets within ShadowFrame.
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "base/enums.h" // PointerSize
-#include "stack.h" // art::ShadowFrame
-#endif
-
-#include "common.def" // DEFINE_OFFSET_EXPR
-
-#define DEFINE_THREAD_OFFSET(field_name, method_name) \
- DEFINE_OFFSET_EXPR(Thread, field_name, int32_t, art::Thread::method_name##Offset<art::kRuntimePointerSize>().Int32Value())
-
-// New macro suffix Method Name (of the Offset method)
-DEFINE_THREAD_OFFSET(FLAGS, ThreadFlags)
-DEFINE_THREAD_OFFSET(ID, ThinLockId)
-DEFINE_THREAD_OFFSET(IS_GC_MARKING, IsGcMarking)
-DEFINE_THREAD_OFFSET(CARD_TABLE, CardTable)
-
-// TODO: The rest of the offsets
-// are dependent on __SIZEOF_POINTER__
-
-#undef DEFINE_THREAD_OFFSET
-
-#include "common_undef.def" // undef DEFINE_OFFSET_EXPR
diff --git a/tools/cpp-define-generator/offsets_all.def b/tools/cpp-define-generator/offsets_all.def
deleted file mode 100644
index 31587d8..0000000
--- a/tools/cpp-define-generator/offsets_all.def
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Includes every single offset file in art.
-// Useful for processing every single offset together.
-
-// Usage:
-// #define DEFINE_INCLUDE_DEPENDENCIES
-// #include "offsets_all.def"
-// to automatically include each def file's header dependencies.
-//
-// Afterwards,
-// #define DEFINE_EXPR(define_name, field_type, expr) ...
-// #include "offsets_all.def"
-// to process each offset however one wants.
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#define DEFINE_EXPR(define_name, field_type, expr)
-#endif
-
-#if !defined(DEFINE_EXPR)
-#error "Either DEFINE_INCLUDE_DEPENDENCIES or DEFINE_EXPR must be defined"
-#endif
-
-#include "constant_reference.def"
-#include "offset_runtime.def"
-// TODO: rest of THREAD_ offsets (depends on __SIZEOF__POINTER__).
-#include "offset_thread.def"
-// TODO: SHADOW_FRAME depends on __SIZEOF__POINTER__
-// #include "offset_shadow_frame.def"
-// TODO: MIRROR_OBJECT_HEADER_SIZE (depends on #ifdef read barrier)
-#include "offset_mirror_class.def"
-#include "offset_mirror_dex_cache.def"
-#include "offset_mirror_object.def"
-#include "constant_class.def"
-// TODO: MIRROR_*_ARRAY offsets (depends on header size)
-// TODO: MIRROR_STRING offsets (depends on header size)
-#include "offset_art_method.def"
-#include "constant_dexcache.def"
-#include "constant_card_table.def"
-#include "constant_heap.def"
-#include "constant_lockword.def"
-#include "constant_globals.def"
-#include "constant_rosalloc.def"
-#include "constant_thread.def"
-#include "constant_jit.def"
-
-// TODO: MIRROR_OBJECT_HEADER_SIZE #ifdef depends on read barriers
-// TODO: Array offsets (depends on MIRROR_OBJECT_HEADER_SIZE)
-
-#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#undef DEFINE_EXPR
-#undef DEFINE_INCLUDE_DEPENDENCIES
-#endif
-
-
diff --git a/tools/cpp-define-generator/presubmit-check-files-up-to-date b/tools/cpp-define-generator/presubmit-check-files-up-to-date
deleted file mode 100755
index 0301a3e..0000000
--- a/tools/cpp-define-generator/presubmit-check-files-up-to-date
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ---------------------------------------------------------------------------
-
-# Generates asm_support_gen.h into a temporary location.
-# Then verifies it is the same as our local stored copy.
-
-GEN_TOOL=cpp-define-generator-data
-
-if ! which "$GEN_TOOL"; then
- if [[ -z $ANDROID_BUILD_TOP ]]; then
- echo "ERROR: Can't find '$GEN_TOOL' in \$PATH. Perhaps try 'source build/envsetup.sh' ?" >&2
- else
- echo "ERROR: Can't find '$GEN_TOOL' in \$PATH. Perhaps try 'make $GEN_TOOL' ?" >&2
- fi
- exit 1
-fi
-
-#######################
-#######################
-
-PREUPLOAD_COMMIT_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
-BUILD_COPY="$(mktemp ${TMPDIR:-/tmp}/tmp.XXXXXX)"
-
-function finish() {
- # Delete temp files.
- [[ -f "$PREUPLOAD_COMMIT_COPY" ]] && rm "$PREUPLOAD_COMMIT_COPY"
- [[ -f "$BUILD_COPY" ]] && rm "$BUILD_COPY"
-}
-trap finish EXIT
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-ART_DIR="$( cd "$DIR/../.." && pwd )"
-ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
-
-# Repo upload hook runs inside of the top-level git directory.
-# If we run this script manually, be in the right place for git.
-cd "$ART_DIR"
-
-if [[ -z $PREUPLOAD_COMMIT ]]; then
- echo "WARNING: Not running as a pre-upload hook. Assuming commit to check = 'HEAD'"
- PREUPLOAD_COMMIT=HEAD
-fi
-
-# Get version we are about to push into git.
-git show "$PREUPLOAD_COMMIT:$ASM_SUPPORT_GEN_CHECKED_IN_COPY" > "$PREUPLOAD_COMMIT_COPY" || exit 1
-# Get version that our build would have made.
-"$GEN_TOOL" > "$BUILD_COPY" || exit 1
-
-if ! diff "$PREUPLOAD_COMMIT_COPY" "$BUILD_COPY"; then
- echo "asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
- echo " has diverged from the build copy." >&2
- echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
- exit 1
-fi
-
-# Success. Print nothing to avoid spamming users.
diff --git a/tools/cpp-define-generator/rosalloc.def b/tools/cpp-define-generator/rosalloc.def
new file mode 100644
index 0000000..eb8d8f2
--- /dev/null
+++ b/tools/cpp-define-generator/rosalloc.def
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "gc/allocator/rosalloc.h"
+#endif
+
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
+ art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1)
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32,
+ ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64,
+ ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSize - 1))
+ASM_DEFINE(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
+ art::gc::allocator::RosAlloc::kThreadLocalBracketQuantumSizeShift)
+ASM_DEFINE(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
+ art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize)
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
+ art::gc::allocator::RosAlloc::RunFreeListHeadOffset())
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_OFFSET,
+ art::gc::allocator::RosAlloc::RunFreeListOffset())
+ASM_DEFINE(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
+ art::gc::allocator::RosAlloc::RunFreeListSizeOffset())
+ASM_DEFINE(ROSALLOC_SLOT_NEXT_OFFSET,
+ art::gc::allocator::RosAlloc::RunSlotNextOffset())
diff --git a/tools/cpp-define-generator/runtime.def b/tools/cpp-define-generator/runtime.def
new file mode 100644
index 0000000..2a2e303
--- /dev/null
+++ b/tools/cpp-define-generator/runtime.def
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "runtime.h"
+#endif
+
+ASM_DEFINE(RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveAllCalleeSaves))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForClinit))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverythingForSuspendCheck))
+ASM_DEFINE(RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveEverything))
+ASM_DEFINE(RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsAndArgs))
+ASM_DEFINE(RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET,
+ art::Runtime::GetCalleeSaveMethodOffset(art::CalleeSaveType::kSaveRefsOnly))
diff --git a/tools/cpp-define-generator/shadow_frame.def b/tools/cpp-define-generator/shadow_frame.def
new file mode 100644
index 0000000..10a309c
--- /dev/null
+++ b/tools/cpp-define-generator/shadow_frame.def
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "interpreter/shadow_frame.h"
+#endif
+
+ASM_DEFINE(SHADOWFRAME_CACHED_HOTNESS_COUNTDOWN_OFFSET,
+ art::ShadowFrame::CachedHotnessCountdownOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET,
+ art::ShadowFrame::DexInstructionsOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_PC_OFFSET,
+ art::ShadowFrame::DexPCOffset())
+ASM_DEFINE(SHADOWFRAME_DEX_PC_PTR_OFFSET,
+ art::ShadowFrame::DexPCPtrOffset())
+ASM_DEFINE(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET,
+ art::ShadowFrame::HotnessCountdownOffset())
+ASM_DEFINE(SHADOWFRAME_LINK_OFFSET,
+ art::ShadowFrame::LinkOffset())
+ASM_DEFINE(SHADOWFRAME_LOCK_COUNT_DATA_OFFSET,
+ art::ShadowFrame::LockCountDataOffset())
+ASM_DEFINE(SHADOWFRAME_METHOD_OFFSET,
+ art::ShadowFrame::MethodOffset())
+ASM_DEFINE(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET,
+ art::ShadowFrame::NumberOfVRegsOffset())
+ASM_DEFINE(SHADOWFRAME_RESULT_REGISTER_OFFSET,
+ art::ShadowFrame::ResultRegisterOffset())
+ASM_DEFINE(SHADOWFRAME_VREGS_OFFSET,
+ art::ShadowFrame::VRegsOffset())
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
new file mode 100644
index 0000000..7b19076
--- /dev/null
+++ b/tools/cpp-define-generator/thread.def
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "thread.h"
+#endif
+
+ASM_DEFINE(THREAD_CARD_TABLE_OFFSET,
+ art::Thread::CardTableOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_CHECKPOINT_REQUEST,
+ art::kCheckpointRequest)
+ASM_DEFINE(THREAD_CURRENT_IBASE_OFFSET,
+ art::Thread::MterpCurrentIBaseOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_EMPTY_CHECKPOINT_REQUEST,
+ art::kEmptyCheckpointRequest)
+ASM_DEFINE(THREAD_EXCEPTION_OFFSET,
+ art::Thread::ExceptionOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_FLAGS_OFFSET,
+ art::Thread::ThreadFlagsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ID_OFFSET,
+ art::Thread::ThinLockIdOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_OFFSET,
+ art::Thread::InterpreterCacheOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_LOG2,
+ art::Thread::InterpreterCacheSizeLog2())
+ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET,
+ art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
+ art::Thread::ThreadLocalAllocStackEndOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
+ art::Thread::ThreadLocalAllocStackTopOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_END_OFFSET,
+ art::Thread::ThreadLocalEndOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_OBJECTS_OFFSET,
+ art::Thread::ThreadLocalObjectsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_LOCAL_POS_OFFSET,
+ art::Thread::ThreadLocalPosOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ROSALLOC_RUNS_OFFSET,
+ art::Thread::RosAllocRunsOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_SELF_OFFSET,
+ art::Thread::SelfOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST,
+ art::kSuspendRequest | art::kCheckpointRequest | art::kEmptyCheckpointRequest)
+ASM_DEFINE(THREAD_SUSPEND_REQUEST,
+ art::kSuspendRequest)
+ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
+ art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
diff --git a/tools/cpp-define-generator/verify-asm-support b/tools/cpp-define-generator/verify-asm-support
deleted file mode 100755
index 745b115..0000000
--- a/tools/cpp-define-generator/verify-asm-support
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# ---------------------------------------------------------------------------
-
-# Generates asm_support_gen.h into the $OUT directory in the build.
-# Then verifies that it is the same as in runtime/generated/asm_support_gen.h
-
-# Validates that art/runtime/generated/asm_support_gen.h
-# - This must be run after a build since it uses cpp-define-generator-data
-
-# Path to asm_support_gen.h that we check into our git repository.
-ASM_SUPPORT_GEN_CHECKED_IN_COPY="runtime/generated/asm_support_gen.h"
-# Instead of producing an error if checked-in copy differs from the generated version,
-# overwrite the local checked-in copy instead.
-OVERWRITE_CHECKED_IN_COPY_IF_CHANGED="n"
-
-#######################
-#######################
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-ART_DIR="$( cd "$DIR/../.." && pwd )"
-ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY="$ART_DIR/runtime/generated/asm_support_gen.h"
-
-# Sanity check that we haven't moved the file around.
-# If we did, perhaps the above constant should be updated.
-if ! [[ -f "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" ]]; then
- echo "ERROR: Missing asm_support_gen.h, expected to be in '$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY'" >&2
- exit 1
-fi
-
-# The absolute path to cpp-define-generator is in $1
-# Generate the file as part of the build into the out location specified by $2.
-
-# Compare that the generated file matches our golden copy that's checked into git.
-# If not, it is a fatal error and the user needs to run 'generate-asm-support' to rebuild.
-
-if [[ $# -lt 2 ]]; then
- echo "Usage: $0 [--quiet] [--presubmit] <path-to-cpp-define-generator-data-binary> <output-file>'" >&2
- exit 1
-fi
-
-# Supress 'chatty' messages during the build.
-# If anything is printed in a success case then
-# the main Android build can't reuse the same line for
-# showing multiple commands being executed.
-QUIET=false
-if [[ "$1" == "--quiet" ]]; then
- QUIET=true
- shift
-fi
-
-CPP_DEFINE_GENERATOR_TOOL="$1"
-OUTPUT_FILE="$2"
-
-function pecho() {
- if ! $QUIET; then
- echo "$@"
- fi
-}
-
-# Generate the header. Print the command we're running to console for readability.
-pecho "cpp-define-generator-data > \"$OUTPUT_FILE\""
-"$CPP_DEFINE_GENERATOR_TOOL" > "$OUTPUT_FILE"
-retval="$?"
-
-if [[ $retval -ne 0 ]]; then
- echo "verify-asm-support: FATAL: Error while running cpp-define-generator-data" >&2
- exit $retval
-fi
-
-if ! diff "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY" "$OUTPUT_FILE"; then
-
- if [[ $OVERWRITE_CHECKED_IN_COPY_IF_CHANGED == "y" ]]; then
- cp "$OUTPUT_FILE" "$ABS_ASM_SUPPORT_GEN_CHECKED_IN_COPY"
- echo "verify-asm-support: OK: Overwrote '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' with build copy."
- echo " Please 'git add $ASM_SUPPORT_GEN_CHECKED_IN_COPY'."
- else
- echo "---------------------------------------------------------------------------------------------" >&2
- echo "verify-asm-support: ERROR: Checked-in copy of '$ASM_SUPPORT_GEN_CHECKED_IN_COPY' " >&2
- echo " has diverged from the build copy." >&2
- echo " Please re-run the 'generate-asm-support' command to resync the header." >&2
- [[ -f "$OUTPUT_FILE" ]] && rm "$OUTPUT_FILE"
- exit 1
- fi
-fi
-
-pecho "verify-asm-support: SUCCESS. Built '$OUTPUT_FILE' which matches our checked in copy."
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index 659a940..88db672 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -118,7 +118,7 @@
ProcessCodeItem(*dex_file,
method.GetInstructionsAndData(),
accessor.GetClassIdx(),
- /*count_types*/ true,
+ /*count_types=*/ true,
types);
}
}
@@ -143,7 +143,7 @@
ProcessCodeItem(*dex_file,
data,
accessor.GetClassIdx(),
- /*count_types*/ false,
+ /*count_types=*/ false,
types);
std::vector<uint8_t> buffer = std::move(buffer_);
buffer_.clear();
diff --git a/tools/dexanalyze/dexanalyze_strings.cc b/tools/dexanalyze/dexanalyze_strings.cc
index 863e4ee..dcadb59 100644
--- a/tools/dexanalyze/dexanalyze_strings.cc
+++ b/tools/dexanalyze/dexanalyze_strings.cc
@@ -21,6 +21,7 @@
#include <iostream>
#include <queue>
+#include "base/time_utils.h"
#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_instruction-inl.h"
@@ -34,10 +35,156 @@
static const size_t kPrefixConstantCost = 4;
static const size_t kPrefixIndexCost = 2;
+class PrefixDictionary {
+ public:
+ // Add prefix data and return the offset to the start of the added data.
+ size_t AddPrefixData(const uint8_t* data, size_t len) {
+ const size_t offset = prefix_data_.size();
+ prefix_data_.insert(prefix_data_.end(), data, data + len);
+ return offset;
+ }
+
+ static constexpr size_t kLengthBits = 8;
+ static constexpr size_t kLengthMask = (1u << kLengthBits) - 1;
+
+ // Return the prefix offset and length.
+ ALWAYS_INLINE void GetOffset(uint32_t prefix_index, uint32_t* offset, uint32_t* length) const {
+ CHECK_LT(prefix_index, offsets_.size());
+ const uint32_t data = offsets_[prefix_index];
+ *length = data & kLengthMask;
+ *offset = data >> kLengthBits;
+ }
+
+ uint32_t AddOffset(uint32_t offset, uint32_t length) {
+ CHECK_LE(length, kLengthMask);
+ offsets_.push_back((offset << kLengthBits) | length);
+ return offsets_.size() - 1;
+ }
+
+ public:
+ std::vector<uint32_t> offsets_;
+ std::vector<uint8_t> prefix_data_;
+};
+
+class PrefixStrings {
+ public:
+ class Builder {
+ public:
+ explicit Builder(PrefixStrings* output) : output_(output) {}
+ void Build(const std::vector<std::string>& strings);
+
+ private:
+ PrefixStrings* const output_;
+ };
+
+ // Return the string index that was added.
+ size_t AddString(uint16_t prefix, const std::string& str) {
+ const size_t string_offset = chars_.size();
+ chars_.push_back(static_cast<uint8_t>(prefix >> 8));
+ chars_.push_back(static_cast<uint8_t>(prefix >> 0));
+ EncodeUnsignedLeb128(&chars_, str.length());
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&str[0]);
+ chars_.insert(chars_.end(), ptr, ptr + str.length());
+ string_offsets_.push_back(string_offset);
+ return string_offsets_.size() - 1;
+ }
+
+ std::string GetString(uint32_t string_idx) const {
+ const size_t offset = string_offsets_[string_idx];
+ const uint8_t* suffix_data = &chars_[offset];
+ uint16_t prefix_idx = (static_cast<uint16_t>(suffix_data[0]) << 8) +
+ suffix_data[1];
+ suffix_data += 2;
+ uint32_t prefix_offset;
+ uint32_t prefix_len;
+ dictionary_.GetOffset(prefix_idx, &prefix_offset, &prefix_len);
+ const uint8_t* prefix_data = &dictionary_.prefix_data_[prefix_offset];
+ std::string ret(prefix_data, prefix_data + prefix_len);
+ uint32_t suffix_len = DecodeUnsignedLeb128(&suffix_data);
+ ret.insert(ret.end(), suffix_data, suffix_data + suffix_len);
+ return ret;
+ }
+
+ ALWAYS_INLINE bool Equal(uint32_t string_idx, const uint8_t* data, size_t len) const {
+ const size_t offset = string_offsets_[string_idx];
+ const uint8_t* suffix_data = &chars_[offset];
+ uint16_t prefix_idx = (static_cast<uint16_t>(suffix_data[0]) << 8) +
+ suffix_data[1];
+ suffix_data += 2;
+ uint32_t prefix_offset;
+ uint32_t prefix_len;
+ dictionary_.GetOffset(prefix_idx, &prefix_offset, &prefix_len);
+ uint32_t suffix_len = DecodeUnsignedLeb128(&suffix_data);
+ if (prefix_len + suffix_len != len) {
+ return false;
+ }
+ const uint8_t* prefix_data = &dictionary_.prefix_data_[prefix_offset];
+ if ((true)) {
+ return memcmp(prefix_data, data, prefix_len) == 0u &&
+ memcmp(suffix_data, data + prefix_len, len - prefix_len) == 0u;
+ } else {
+ len -= prefix_len;
+ while (prefix_len != 0u) {
+ if (*prefix_data++ != *data++) {
+ return false;
+ }
+ --prefix_len;
+ }
+ while (len != 0u) {
+ if (*suffix_data++ != *data++) {
+ return false;
+ }
+ --len;
+ }
+ return true;
+ }
+ }
+
+ public:
+ PrefixDictionary dictionary_;
+ std::vector<uint8_t> chars_;
+ std::vector<uint32_t> string_offsets_;
+};
+
+// Normal non prefix strings.
+class NormalStrings {
+ public:
+ // Return the string index that was added.
+ size_t AddString(const std::string& str) {
+ const size_t string_offset = chars_.size();
+ EncodeUnsignedLeb128(&chars_, str.length());
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&str[0]);
+ chars_.insert(chars_.end(), ptr, ptr + str.length());
+ string_offsets_.push_back(string_offset);
+ return string_offsets_.size() - 1;
+ }
+
+ std::string GetString(uint32_t string_idx) const {
+ const size_t offset = string_offsets_[string_idx];
+ const uint8_t* data = &chars_[offset];
+ uint32_t len = DecodeUnsignedLeb128(&data);
+ return std::string(data, data + len);
+ }
+
+ ALWAYS_INLINE bool Equal(uint32_t string_idx, const uint8_t* data, size_t len) const {
+ const size_t offset = string_offsets_[string_idx];
+ const uint8_t* str_data = &chars_[offset];
+ uint32_t str_len = DecodeUnsignedLeb128(&str_data);
+ if (str_len != len) {
+ return false;
+ }
+ return memcmp(data, str_data, len) == 0u;
+ }
+
+ public:
+ std::vector<uint8_t> chars_;
+ std::vector<uint32_t> string_offsets_;
+};
+
// Node value = (distance from root) * (occurrences - 1).
class MatchTrie {
public:
- void Add(const std::string& str) {
+ MatchTrie* Add(const std::string& str) {
MatchTrie* node = this;
size_t depth = 0u;
for (uint8_t c : str) {
@@ -54,33 +201,28 @@
}
++node->count_;
}
- node->is_end_ = true;
+ return node;
}
// Returns the length of the longest prefix and if it's a leaf node.
- std::pair<size_t, bool> LongestPrefix(const std::string& str) const {
- const MatchTrie* node = this;
- const MatchTrie* best_node = this;
- size_t depth = 0u;
- size_t best_depth = 0u;
+ MatchTrie* LongestPrefix(const std::string& str) {
+ MatchTrie* node = this;
for (uint8_t c : str) {
if (node->nodes_[c] == nullptr) {
break;
}
node = node->nodes_[c].get();
- ++depth;
- if (node->is_end_) {
- best_depth = depth;
- best_node = node;
- }
}
- bool is_leaf = true;
- for (const std::unique_ptr<MatchTrie>& cur_node : best_node->nodes_) {
+ return node;
+ }
+
+ bool IsLeaf() const {
+ for (const std::unique_ptr<MatchTrie>& cur_node : nodes_) {
if (cur_node != nullptr) {
- is_leaf = false;
+ return false;
}
}
- return {best_depth, is_leaf};
+ return true;
}
int32_t Savings() const {
@@ -134,7 +276,7 @@
++num_childs;
}
}
- if (num_childs > 1u || elem->is_end_) {
+ if (num_childs > 1u || elem->value_ != 0u) {
queue.emplace(elem->Savings(), elem);
}
}
@@ -166,30 +308,117 @@
if (pair.first <= 0) {
continue;
}
- std::vector<uint8_t> chars;
- for (MatchTrie* cur = pair.second; cur != this; cur = cur->parent_) {
- chars.push_back(cur->incoming_);
- }
- ret.push_back(std::string(chars.rbegin(), chars.rend()));
- // LOG(INFO) << pair.second->Savings() << " : " << ret.back();
+ ret.push_back(pair.second->GetString());
}
return ret;
}
+ std::string GetString() const {
+ std::vector<uint8_t> chars;
+ for (const MatchTrie* cur = this; cur->parent_ != nullptr; cur = cur->parent_) {
+ chars.push_back(cur->incoming_);
+ }
+ return std::string(chars.rbegin(), chars.rend());
+ }
+
std::unique_ptr<MatchTrie> nodes_[256];
MatchTrie* parent_ = nullptr;
uint32_t count_ = 0u;
- int32_t depth_ = 0u;
+ uint32_t depth_ = 0u;
int32_t savings_ = 0u;
uint8_t incoming_ = 0u;
- // If the current node is the end of a possible prefix.
- bool is_end_ = false;
+ // Value of the current node, non zero if the node is chosen.
+ uint32_t value_ = 0u;
// If the current node is chosen to be a used prefix.
bool chosen_ = false;
// If the current node is a prefix of a longer chosen prefix.
uint32_t chosen_suffix_count_ = 0u;
};
+void PrefixStrings::Builder::Build(const std::vector<std::string>& strings) {
+ std::unique_ptr<MatchTrie> prefixe_trie(new MatchTrie());
+ for (size_t i = 0; i < strings.size(); ++i) {
+ size_t len = 0u;
+ if (i > 0u) {
+ CHECK_GT(strings[i], strings[i - 1]);
+ len = std::max(len, PrefixLen(strings[i], strings[i - 1]));
+ }
+ if (i < strings.size() - 1) {
+ len = std::max(len, PrefixLen(strings[i], strings[i + 1]));
+ }
+ len = std::min(len, kMaxPrefixLen);
+ if (len >= kMinPrefixLen) {
+ prefixe_trie->Add(strings[i].substr(0, len))->value_ = 1u;
+ }
+ }
+
+ // Build prefixes.
+ {
+ static constexpr size_t kPrefixBits = 15;
+ std::vector<std::string> prefixes(prefixe_trie->ExtractPrefixes(1 << kPrefixBits));
+ // Add longest prefixes first so that subprefixes can share data.
+ std::sort(prefixes.begin(), prefixes.end(), [](const std::string& a, const std::string& b) {
+ return a.length() > b.length();
+ });
+ prefixe_trie.reset();
+ prefixe_trie.reset(new MatchTrie());
+ uint32_t prefix_idx = 0u;
+ CHECK_EQ(output_->dictionary_.AddOffset(0u, 0u), prefix_idx++);
+ for (const std::string& str : prefixes) {
+ uint32_t prefix_offset = 0u;
+ MatchTrie* node = prefixe_trie->LongestPrefix(str);
+ if (node != nullptr && node->depth_ == str.length() && node->value_ != 0u) {
+ CHECK_EQ(node->GetString(), str);
+ uint32_t existing_len = 0u;
+ output_->dictionary_.GetOffset(node->value_, &prefix_offset, &existing_len);
+ // Make sure to register the current node.
+ prefixe_trie->Add(str)->value_ = prefix_idx;
+ } else {
+ auto add_str = [&](const std::string& s) {
+ node = prefixe_trie->Add(s);
+ node->value_ = prefix_idx;
+ while (node != nullptr) {
+ node->value_ = prefix_idx;
+ node = node->parent_;
+ }
+ };
+ static constexpr size_t kNumSubstrings = 1u;
+ // Increasing kNumSubstrings provides savings since it enables common substrings and not
+ // only prefixes to share data. The problem is that it's slow.
+ for (size_t i = 0; i < std::min(str.length(), kNumSubstrings); ++i) {
+ add_str(str.substr(i));
+ }
+ prefix_offset = output_->dictionary_.AddPrefixData(
+ reinterpret_cast<const uint8_t*>(&str[0]),
+ str.length());
+ }
+ // TODO: Validiate the prefix offset.
+ CHECK_EQ(output_->dictionary_.AddOffset(prefix_offset, str.length()), prefix_idx);
+ ++prefix_idx;
+ }
+ }
+
+ // Add strings to the dictionary.
+ for (const std::string& str : strings) {
+ MatchTrie* node = prefixe_trie->LongestPrefix(str);
+ uint32_t prefix_idx = 0u;
+ uint32_t best_length = 0u;
+ while (node != nullptr) {
+ uint32_t offset = 0u;
+ uint32_t length = 0u;
+ output_->dictionary_.GetOffset(node->value_, &offset, &length);
+ if (node->depth_ == length) {
+ prefix_idx = node->value_;
+ best_length = node->depth_;
+ break;
+ // Actually the prefix we want.
+ }
+ node = node->parent_;
+ }
+ output_->AddString(prefix_idx, str.substr(best_length));
+ }
+}
+
void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
std::set<std::string> unique_strings;
// Accumulate the strings.
@@ -212,18 +441,13 @@
unique_strings.insert(data);
}
}
- // Unique strings only since we want to exclude savings from multidex duplication.
- ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()), 1);
+ // Unique strings only since we want to exclude savings from multi-dex duplication.
+ ProcessStrings(std::vector<std::string>(unique_strings.begin(), unique_strings.end()));
}
-void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings, size_t iterations) {
- if (iterations == 0u) {
- return;
- }
+void AnalyzeStrings::ProcessStrings(const std::vector<std::string>& strings) {
// Calculate total shared prefix.
- std::vector<size_t> shared_len;
- prefixes_.clear();
- std::unique_ptr<MatchTrie> prefix_construct(new MatchTrie());
+ size_t prefix_index_cost_ = 0u;
for (size_t i = 0; i < strings.size(); ++i) {
size_t best_len = 0;
if (i > 0) {
@@ -233,131 +457,117 @@
best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
}
best_len = std::min(best_len, kMaxPrefixLen);
- std::string prefix;
if (best_len >= kMinPrefixLen) {
- prefix = strings[i].substr(0, best_len);
- prefix_construct->Add(prefix);
- ++prefixes_[prefix];
total_shared_prefix_bytes_ += best_len;
}
- total_prefix_index_cost_ += kPrefixIndexCost;
- }
-
- static constexpr size_t kPrefixBits = 15;
- static constexpr size_t kShortLen = (1u << (15 - kPrefixBits)) - 1;
- std::unique_ptr<MatchTrie> prefix_trie(new MatchTrie());
- static constexpr bool kUseGreedyTrie = true;
- if (kUseGreedyTrie) {
- std::vector<std::string> prefixes(prefix_construct->ExtractPrefixes(1 << kPrefixBits));
- for (auto&& str : prefixes) {
- prefix_trie->Add(str);
- }
- } else {
- // Optimize the result by moving long prefixes to shorter ones if it causes additional savings.
- while (true) {
- bool have_savings = false;
- auto it = prefixes_.begin();
- std::vector<std::string> longest;
- for (const auto& pair : prefixes_) {
- longest.push_back(pair.first);
- }
- std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
- return a.length() > b.length();
- });
- // Do longest first since this provides the best results.
- for (const std::string& s : longest) {
- it = prefixes_.find(s);
- CHECK(it != prefixes_.end());
- const std::string& prefix = it->first;
- int64_t best_savings = 0u;
- int64_t best_len = -1;
- for (int64_t len = prefix.length() - 1; len >= 0; --len) {
- auto found = prefixes_.find(prefix.substr(0, len));
- if (len != 0 && found == prefixes_.end()) {
- continue;
- }
- // Calculate savings from downgrading the prefix.
- int64_t savings = kPrefixConstantCost + prefix.length() -
- (prefix.length() - len) * it->second;
- if (savings > best_savings) {
- best_savings = savings;
- best_len = len;
- break;
- }
- }
- if (best_len != -1) {
- prefixes_[prefix.substr(0, best_len)] += it->second;
- it = prefixes_.erase(it);
- optimization_savings_ += best_savings;
- have_savings = true;
- } else {
- ++it;
- }
- }
- if (!have_savings) {
- break;
- }
- }
- for (auto&& pair : prefixes_) {
- prefix_trie->Add(pair.first);
- }
- }
-
- // Count longest prefixes.
- std::set<std::string> used_prefixes;
- std::vector<std::string> suffix;
- for (const std::string& str : strings) {
- auto pair = prefix_trie->LongestPrefix(str);
- const size_t len = pair.first;
- if (len >= kMinPrefixLen) {
- ++strings_used_prefixed_;
- total_prefix_savings_ += len;
- used_prefixes.insert(str.substr(0, len));
- }
- suffix.push_back(str.substr(len));
- if (suffix.back().size() < kShortLen) {
+ prefix_index_cost_ += kPrefixIndexCost;
+ if (strings[i].length() < 64) {
++short_strings_;
} else {
++long_strings_;
}
}
- std::sort(suffix.begin(), suffix.end());
- for (const std::string& prefix : used_prefixes) {
- // 4 bytes for an offset, one for length.
- auto pair = prefix_trie->LongestPrefix(prefix);
- CHECK_EQ(pair.first, prefix.length());
- if (pair.second) {
- // Only need to add to dictionary if it's a leaf, otherwise we can reuse string data of the
- // other prefix.
- total_prefix_dict_ += prefix.size();
- }
- total_prefix_table_ += kPrefixConstantCost;
+ total_prefix_index_cost_ += prefix_index_cost_;
+
+ PrefixStrings prefix_strings;
+ {
+ PrefixStrings::Builder prefix_builder(&prefix_strings);
+ prefix_builder.Build(strings);
}
- ProcessStrings(suffix, iterations - 1);
+ Benchmark(prefix_strings, strings, &prefix_timings_);
+ const size_t num_prefixes = prefix_strings.dictionary_.offsets_.size();
+ total_num_prefixes_ += num_prefixes;
+ total_prefix_table_ += num_prefixes * sizeof(prefix_strings.dictionary_.offsets_[0]);
+ total_prefix_dict_ += prefix_strings.dictionary_.prefix_data_.size();
+
+ {
+ NormalStrings normal_strings;
+ for (const std::string& s : strings) {
+ normal_strings.AddString(s);
+ }
+ const uint64_t unique_string_data_bytes = normal_strings.chars_.size();
+ total_unique_string_data_bytes_ += unique_string_data_bytes;
+ total_prefix_savings_ += unique_string_data_bytes - prefix_strings.chars_.size() +
+ prefix_index_cost_;
+ Benchmark(normal_strings, strings, &normal_timings_);
+ }
+}
+
+template <typename Strings>
+void AnalyzeStrings::Benchmark(const Strings& strings,
+ const std::vector<std::string>& reference,
+ StringTimings* timings) {
+ const size_t kIterations = 100;
+ timings->num_comparisons_ += reference.size() * kIterations;
+
+ uint64_t start = NanoTime();
+ for (size_t j = 0; j < kIterations; ++j) {
+ for (size_t i = 0; i < reference.size(); ++i) {
+ CHECK(strings.Equal(
+ i,
+ reinterpret_cast<const uint8_t*>(&reference[i][0]),
+ reference[i].length()))
+ << i << ": " << strings.GetString(i) << " vs " << reference[i];
+ }
+ }
+ timings->time_equal_comparisons_ += NanoTime() - start;
+
+ start = NanoTime();
+ for (size_t j = 0; j < kIterations; ++j) {
+ size_t count = 0u;
+ for (size_t i = 0; i < reference.size(); ++i) {
+ count += strings.Equal(
+ reference.size() - 1 - i,
+ reinterpret_cast<const uint8_t*>(&reference[i][0]),
+ reference[i].length());
+ }
+ CHECK_LT(count, 2u);
+ }
+ timings->time_non_equal_comparisons_ += NanoTime() - start;
+}
+
+template void AnalyzeStrings::Benchmark(const PrefixStrings&,
+ const std::vector<std::string>&,
+ StringTimings* timings);
+template void AnalyzeStrings::Benchmark(const NormalStrings&,
+ const std::vector<std::string>&,
+ StringTimings* timings);
+
+void StringTimings::Dump(std::ostream& os) const {
+ const double comparisons = static_cast<double>(num_comparisons_);
+ os << "Compare equal " << static_cast<double>(time_equal_comparisons_) / comparisons << "\n";
+ os << "Compare not equal " << static_cast<double>(time_non_equal_comparisons_) / comparisons << "\n";
}
void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
os << "Total string data bytes " << Percent(string_data_bytes_, total_size) << "\n";
+ os << "Total unique string data bytes "
+ << Percent(total_unique_string_data_bytes_, total_size) << "\n";
os << "UTF-16 string data bytes " << Percent(wide_string_bytes_, total_size) << "\n";
os << "ASCII string data bytes " << Percent(ascii_string_bytes_, total_size) << "\n";
+ os << "Prefix string timings\n";
+ prefix_timings_.Dump(os);
+ os << "Normal string timings\n";
+ normal_timings_.Dump(os);
+
// Prefix based strings.
os << "Total shared prefix bytes " << Percent(total_shared_prefix_bytes_, total_size) << "\n";
os << "Prefix dictionary cost " << Percent(total_prefix_dict_, total_size) << "\n";
os << "Prefix table cost " << Percent(total_prefix_table_, total_size) << "\n";
os << "Prefix index cost " << Percent(total_prefix_index_cost_, total_size) << "\n";
- int64_t net_savings = total_prefix_savings_ + short_strings_;
+ int64_t net_savings = total_prefix_savings_;
net_savings -= total_prefix_dict_;
net_savings -= total_prefix_table_;
net_savings -= total_prefix_index_cost_;
os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
- os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
+ os << "Prefix base savings " << Percent(total_prefix_savings_, total_size) << "\n";
os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
os << "Strings using prefix "
<< Percent(strings_used_prefixed_, total_prefix_index_cost_ / kPrefixIndexCost) << "\n";
os << "Short strings " << Percent(short_strings_, short_strings_ + long_strings_) << "\n";
if (verbose_level_ >= VerboseLevel::kEverything) {
- std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
+ std::vector<std::pair<std::string, size_t>> pairs; // (prefixes_.begin(), prefixes_.end());
// Sort lexicographically.
std::sort(pairs.begin(), pairs.end());
for (const auto& pair : pairs) {
diff --git a/tools/dexanalyze/dexanalyze_strings.h b/tools/dexanalyze/dexanalyze_strings.h
index 32702a6..88ea467 100644
--- a/tools/dexanalyze/dexanalyze_strings.h
+++ b/tools/dexanalyze/dexanalyze_strings.h
@@ -18,9 +18,10 @@
#define ART_TOOLS_DEXANALYZE_DEXANALYZE_STRINGS_H_
#include <array>
-#include <vector>
#include <map>
+#include <vector>
+#include "base/leb128.h"
#include "base/safe_map.h"
#include "dexanalyze_experiments.h"
#include "dex/code_item_accessors.h"
@@ -29,6 +30,15 @@
namespace art {
namespace dexanalyze {
+class StringTimings {
+ public:
+ void Dump(std::ostream& os) const;
+
+ uint64_t time_equal_comparisons_ = 0u;
+ uint64_t time_non_equal_comparisons_ = 0u;
+ uint64_t num_comparisons_ = 0u;
+};
+
// Analyze string data and strings accessed from code.
class AnalyzeStrings : public Experiment {
public:
@@ -36,22 +46,26 @@
void Dump(std::ostream& os, uint64_t total_size) const override;
private:
- void ProcessStrings(const std::vector<std::string>& strings, size_t iterations);
+ void ProcessStrings(const std::vector<std::string>& strings);
+ template <typename Strings> void Benchmark(const Strings& strings,
+ const std::vector<std::string>& reference,
+ StringTimings* timings);
+ StringTimings prefix_timings_;
+ StringTimings normal_timings_;
int64_t wide_string_bytes_ = 0u;
int64_t ascii_string_bytes_ = 0u;
int64_t string_data_bytes_ = 0u;
+ int64_t total_unique_string_data_bytes_ = 0u;
int64_t total_shared_prefix_bytes_ = 0u;
int64_t total_prefix_savings_ = 0u;
int64_t total_prefix_dict_ = 0u;
int64_t total_prefix_table_ = 0u;
int64_t total_prefix_index_cost_ = 0u;
int64_t total_num_prefixes_ = 0u;
- int64_t optimization_savings_ = 0u;
int64_t strings_used_prefixed_ = 0u;
int64_t short_strings_ = 0u;
int64_t long_strings_ = 0u;
- std::unordered_map<std::string, size_t> prefixes_;
};
} // namespace dexanalyze
diff --git a/tools/dexanalyze/dexanalyze_test.cc b/tools/dexanalyze/dexanalyze_test.cc
index 96be3f9..c6648c0 100644
--- a/tools/dexanalyze/dexanalyze_test.cc
+++ b/tools/dexanalyze/dexanalyze_test.cc
@@ -37,23 +37,23 @@
};
TEST_F(DexAnalyzeTest, NoInputFileGiven) {
- DexAnalyzeExec({ "-a" }, /*expect_success*/ false);
+ DexAnalyzeExec({ "-a" }, /*expect_success=*/ false);
}
TEST_F(DexAnalyzeTest, CantOpenInput) {
- DexAnalyzeExec({ "-a", "/non/existent/path" }, /*expect_success*/ false);
+ DexAnalyzeExec({ "-a", "/non/existent/path" }, /*expect_success=*/ false);
}
TEST_F(DexAnalyzeTest, TestAnalyzeMultidex) {
- DexAnalyzeExec({ "-a", GetTestDexFileName("MultiDex") }, /*expect_success*/ true);
+ DexAnalyzeExec({ "-a", GetTestDexFileName("MultiDex") }, /*expect_success=*/ true);
}
TEST_F(DexAnalyzeTest, TestAnalizeCoreDex) {
- DexAnalyzeExec({ "-a", GetLibCoreDexFileNames()[0] }, /*expect_success*/ true);
+ DexAnalyzeExec({ "-a", GetLibCoreDexFileNames()[0] }, /*expect_success=*/ true);
}
TEST_F(DexAnalyzeTest, TestInvalidArg) {
- DexAnalyzeExec({ "-invalid-option" }, /*expect_success*/ false);
+ DexAnalyzeExec({ "-invalid-option" }, /*expect_success=*/ false);
}
} // namespace art
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index 9b6ff98..38e5a99 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -10,47 +10,47 @@
description: "Test fails due to unexpectedly getting the thread-groups of zombie threads",
result: EXEC_FAILED,
bug: 66906414,
- name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
+ name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference_ThreadGroup002Test#testThreadGroup002"
},
{
description: "Test fails due to static values not being set correctly.",
result: EXEC_FAILED,
bug: 66905894,
- name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
+ name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues006Test#testGetValues006"
},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",
result: EXEC_FAILED,
bug: 66906734,
- names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference.SetValues003Test#testSetValues003_InvalidIndex",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod002",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod003",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.NewInstanceTest#testNewInstance002",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.SetValues002Test#testSetValues002",
- "org.apache.harmony.jpda.tests.jdwp.Events.ClassPrepare002Test#testClassPrepareCausedByDebugger",
- "org.apache.harmony.jpda.tests.jdwp.Events.ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.DisableCollectionTest#testDisableCollection_null",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_invalid",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_null",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.GetValues002Test#testGetValues002",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValues003Test#testSetValues003",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValuesTest#testSetValues001",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.FieldsWithGenericTest#testFieldsWithGeneric001",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues002Test#testGetValues002",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues004Test#testGetValues004",
- "org.apache.harmony.jpda.tests.jdwp.StringReference.ValueTest#testStringReferenceValueTest001_NullString",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ChildrenTest#testChildren_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference_SetValues003Test#testSetValues003_InvalidIndex",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod003",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_NewInstanceTest#testNewInstance002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_SetValues002Test#testSetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.Events_ClassPrepare002Test#testClassPrepareCausedByDebugger",
+ "org.apache.harmony.jpda.tests.jdwp.Events_ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_DisableCollectionTest#testDisableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_invalid",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValues003Test#testSetValues003",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValuesTest#testSetValues001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_FieldsWithGenericTest#testFieldsWithGeneric001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues004Test#testGetValues004",
+ "org.apache.harmony.jpda.tests.jdwp.StringReference_ValueTest#testStringReferenceValueTest001_NullString",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ChildrenTest#testChildren_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_NameTest#testName001_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ParentTest#testParent_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_CapabilitiesNewTest#testCapabilitiesNew001" ]
},
{
description: "Test times out on fugu-debug",
result: EXEC_FAILED,
bug: 70459916,
- names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug",
- "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest002#testVMDebug" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest#testVMDebug",
+ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest002#testVMDebug" ]
}
]
diff --git a/tools/field-null-percent/Android.bp b/tools/field-null-percent/Android.bp
new file mode 100644
index 0000000..26bb1dc
--- /dev/null
+++ b/tools/field-null-percent/Android.bp
@@ -0,0 +1,56 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+ name: "fieldnull-defaults",
+ host_supported: true,
+ srcs: ["fieldnull.cc"],
+ defaults: ["art_defaults"],
+
+ // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+ // to be same ISA as what it is attached to.
+ compile_multilib: "both",
+
+ shared_libs: [
+ "libbase",
+ ],
+ header_libs: [
+ "libopenjdkjvmti_headers",
+ ],
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+ symlink_preferred_arch: true,
+}
+
+art_cc_library {
+ name: "libfieldnull",
+ defaults: ["fieldnull-defaults"],
+}
+
+art_cc_library {
+ name: "libfieldnulld",
+ defaults: [
+ "art_debug_defaults",
+ "fieldnull-defaults",
+ ],
+}
diff --git a/tools/field-null-percent/README.md b/tools/field-null-percent/README.md
new file mode 100644
index 0000000..d8bc65d
--- /dev/null
+++ b/tools/field-null-percent/README.md
@@ -0,0 +1,51 @@
+# fieldnull
+
+fieldnull is a JVMTI agent designed for testing for a given field the number of
+instances with that field set to null. This can be useful for determining what
+fields should be moved into side structures in cases where memory use is
+important.
+
+# Usage
+### Build
+> `make libfieldnull`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+> `Lname/of/class;.nameOfField:Ltype/of/field;[,...]`
+
+#### ART
+> `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libfieldnull.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+> `adb shell setenforce 0`
+>
+> `adb push $ANDROID_PRODUCT_OUT/system/lib64/libfieldnull.so /data/local/tmp/`
+>
+> `adb shell am start-activity --attach-agent '/data/local/tmp/libfieldnull.so=Ljava/lang/Class;.name:Ljava/lang/String;' some.debuggable.apps/.the.app.MainActivity`
+
+#### RI
+> `java '-agentpath:libfieldnull.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/helloworld/classes helloworld`
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+> `kill -SIGQUIT $(pid com.littleinc.orm_benchmark)`
+
+Will initiate a dump of the counts (to logcat).
+
+The dump will look something like this.
+
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:96] Dumping counts of null fields.`
+>
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:97] Field name null count total count`
+>
+> `dalvikvm32 I 08-30 14:51:20 84818 84818 fieldnull.cc:135] Ljava/lang/Class;.name:Ljava/lang/String; 5 2936`
diff --git a/tools/field-null-percent/check-null-fields.py b/tools/field-null-percent/check-null-fields.py
new file mode 100755
index 0000000..c11d51a
--- /dev/null
+++ b/tools/field-null-percent/check-null-fields.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Retrieves the counts of how many objects have a particular field null on all running processes.
+
+Prints a json map from pid -> (log-tag, field-name, null-count, total-count).
+"""
+
+
+import adb
+import argparse
+import concurrent.futures
+import itertools
+import json
+import logging
+import os
+import os.path
+import signal
+import subprocess
+import time
+
+def main():
+ parser = argparse.ArgumentParser(description="Get counts of null fields from a device.")
+ parser.add_argument("-S", "--serial", metavar="SERIAL", type=str,
+ required=False,
+ default=os.environ.get("ANDROID_SERIAL", None),
+ help="Android serial to use. Defaults to ANDROID_SERIAL")
+ parser.add_argument("-p", "--pid", required=False,
+ default=[], action="append",
+ help="Specific pids to check. By default checks all running dalvik processes")
+ has_out = "OUT" in os.environ
+ def_32 = os.path.join(os.environ.get("OUT", ""), "system", "lib", "libfieldnull.so")
+ def_64 = os.path.join(os.environ.get("OUT", ""), "system", "lib64", "libfieldnull.so")
+ has_32 = has_out and os.path.exists(def_32)
+ has_64 = has_out and os.path.exists(def_64)
+ def pushable_lib(name):
+ if os.path.isfile(name):
+ return name
+ else:
+ raise argparse.ArgumentTypeError(name + " is not a file!")
+ parser.add_argument('--lib32', type=pushable_lib,
+ required=not has_32,
+ action='store',
+ default=def_32,
+ help="Location of 32 bit agent to push")
+ parser.add_argument('--lib64', type=pushable_lib,
+ required=not has_64,
+ action='store',
+ default=def_64 if has_64 else None,
+ help="Location of 64 bit agent to push")
+ parser.add_argument("fields", nargs="+",
+ help="fields to check")
+
+ out = parser.parse_args()
+
+ device = adb.device.get_device(out.serial)
+ print("getting root")
+ device.root()
+
+ print("Disabling selinux")
+ device.shell("setenforce 0".split())
+
+ print("Pushing libraries")
+ lib32 = device.shell("mktemp".split())[0].strip()
+ lib64 = device.shell("mktemp".split())[0].strip()
+
+ print(out.lib32 + " -> " + lib32)
+ device.push(out.lib32, lib32)
+
+ print(out.lib64 + " -> " + lib64)
+ device.push(out.lib64, lib64)
+
+ cmd32 = "'{}={}'".format(lib32, ','.join(out.fields))
+ cmd64 = "'{}={}'".format(lib64, ','.join(out.fields))
+
+ if len(out.pid) == 0:
+ print("Getting jdwp pids")
+ new_env = dict(os.environ)
+ new_env["ANDROID_SERIAL"] = device.serial
+ p = subprocess.Popen([device.adb_path, "jdwp"], env=new_env, stdout=subprocess.PIPE)
+ # ADB jdwp doesn't ever exit so just kill it after 1 second to get a list of pids.
+ with concurrent.futures.ProcessPoolExecutor() as ppe:
+ ppe.submit(kill_it, p.pid).result()
+ out.pid = p.communicate()[0].strip().split()
+ p.wait()
+ print(out.pid)
+ print("Clearing logcat")
+ device.shell("logcat -c".split())
+ final = {}
+ print("Getting info from every process dumped to logcat")
+ for p in out.pid:
+ res = check_single_process(p, device, cmd32, cmd64);
+ if res is not None:
+ final[p] = res
+ device.shell('rm {}'.format(lib32).split())
+ device.shell('rm {}'.format(lib64).split())
+ print(json.dumps(final, indent=2))
+
+def kill_it(p):
+ time.sleep(1)
+ os.kill(p, signal.SIGINT)
+
+def check_single_process(pid, device, bit32, bit64):
+ try:
+ # Just try attaching both 32 and 64 bit. Wrong one will fail silently.
+ device.shell(['am', 'attach-agent', str(pid), bit32])
+ device.shell(['am', 'attach-agent', str(pid), bit64])
+ time.sleep(0.5)
+ device.shell('kill -3 {}'.format(pid).split())
+ time.sleep(0.5)
+ out = []
+ all_fields = []
+ lc_cmd = "logcat -d -b main --pid={} -e '^\\t.*\\t[0-9]*\\t[0-9]*$'".format(pid).split(' ')
+ for l in device.shell(lc_cmd)[0].strip().split('\n'):
+ # first 4 are just date and other useless data.
+ data = l.strip().split()[5:]
+ if len(data) < 4:
+ continue
+ # If we run multiple times many copies of the agent will be attached. Just choose one of any
+ # copies for each field.
+ field = data[1]
+ if field not in all_fields:
+ out.append((str(data[0]), str(data[1]), int(data[2]), int(data[3])))
+ all_fields.append(field)
+ if len(out) != 0:
+ print("pid: " + pid + " -> " + str(out))
+ return out
+ else:
+ return None
+ except adb.device.ShellError as e:
+ print("failed on pid " + repr(pid) + " because " + repr(e))
+ return None
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/field-null-percent/fieldnull.cc b/tools/field-null-percent/fieldnull.cc
new file mode 100644
index 0000000..8f5b389
--- /dev/null
+++ b/tools/field-null-percent/fieldnull.cc
@@ -0,0 +1,218 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+
+#include <atomic>
+#include <iomanip>
+#include <iostream>
+#include <istream>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <sstream>
+#include <string.h>
+#include <string>
+#include <vector>
+
+namespace fieldnull {
+
+#define CHECK_JVMTI(x) CHECK_EQ((x), JVMTI_ERROR_NONE)
+
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+static JavaVM* java_vm = nullptr;
+
+// Field is "Lclass/name/here;.field_name:Lfield/type/here;"
+static std::pair<jclass, jfieldID> SplitField(JNIEnv* env, const std::string& field_id) {
+ CHECK_EQ(field_id[0], 'L');
+ env->PushLocalFrame(1);
+ std::istringstream is(field_id);
+ std::string class_name;
+ std::string field_name;
+ std::string field_type;
+
+ std::getline(is, class_name, '.');
+ std::getline(is, field_name, ':');
+ std::getline(is, field_type, '\0');
+
+ jclass klass = reinterpret_cast<jclass>(
+ env->NewGlobalRef(env->FindClass(class_name.substr(1, class_name.size() - 2).c_str())));
+ jfieldID field = env->GetFieldID(klass, field_name.c_str(), field_type.c_str());
+ CHECK(klass != nullptr);
+ CHECK(field != nullptr);
+ LOG(INFO) << "listing field " << field_id;
+ env->PopLocalFrame(nullptr);
+ return std::make_pair(klass, field);
+}
+
+static std::vector<std::pair<jclass, jfieldID>> GetRequestedFields(JNIEnv* env,
+ const std::string& args) {
+ std::vector<std::pair<jclass, jfieldID>> res;
+ std::stringstream args_stream(args);
+ std::string item;
+ while (std::getline(args_stream, item, ',')) {
+ if (item == "") {
+ continue;
+ }
+ res.push_back(SplitField(env, item));
+ }
+ return res;
+}
+
+static jint SetupJvmtiEnv(JavaVM* vm, jvmtiEnv** jvmti) {
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(jvmti), JVMTI_VERSION_1_1);
+
+ if (res != JNI_OK || *jvmti == nullptr) {
+ LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+ return vm->GetEnv(reinterpret_cast<void**>(jvmti), kArtTiVersion);
+ }
+ return res;
+}
+
+struct RequestList {
+ std::vector<std::pair<jclass, jfieldID>> fields_;
+};
+
+static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+ JNIEnv* env = nullptr;
+ CHECK_EQ(java_vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+ LOG(INFO) << "Dumping counts of null fields.";
+ LOG(INFO) << "\t" << "Field name"
+ << "\t" << "null count"
+ << "\t" << "total count";
+ RequestList* list;
+ CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+ for (std::pair<jclass, jfieldID>& p : list->fields_) {
+ jclass klass = p.first;
+ jfieldID field = p.second;
+ // Make sure all instances of the class are tagged with the klass ptr value. Since this is a
+ // global ref it's guaranteed to be unique.
+ CHECK_JVMTI(jvmti->IterateOverInstancesOfClass(
+ p.first,
+ // We need to do this to all objects every time since we might be looking for multiple
+ // fields in classes that are subtypes of each other.
+ JVMTI_HEAP_OBJECT_EITHER,
+ /* class_tag, size, tag_ptr, user_data*/
+ [](jlong, jlong, jlong* tag_ptr, void* klass) -> jvmtiIterationControl {
+ *tag_ptr = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+ return JVMTI_ITERATION_CONTINUE;
+ },
+ klass));
+ jobject* obj_list;
+ jint obj_len;
+ jlong tag = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+ CHECK_JVMTI(jvmti->GetObjectsWithTags(1, &tag, &obj_len, &obj_list, nullptr));
+
+ uint64_t null_cnt = 0;
+ for (jint i = 0; i < obj_len; i++) {
+ if (env->GetObjectField(obj_list[i], field) == nullptr) {
+ null_cnt++;
+ }
+ }
+
+ char* field_name;
+ char* field_sig;
+ char* class_name;
+ CHECK_JVMTI(jvmti->GetFieldName(klass, field, &field_name, &field_sig, nullptr));
+ CHECK_JVMTI(jvmti->GetClassSignature(klass, &class_name, nullptr));
+ LOG(INFO) << "\t" << class_name << "." << field_name << ":" << field_sig
+ << "\t" << null_cnt
+ << "\t" << obj_len;
+ CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_name)));
+ CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_sig)));
+ CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(class_name)));
+ }
+}
+
+static void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+ DataDumpRequestCb(jvmti);
+ RequestList* list = nullptr;
+ CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+ delete list;
+}
+
+static void CreateFieldList(jvmtiEnv* jvmti, JNIEnv* env, const std::string& args) {
+ RequestList* list = nullptr;
+ CHECK_JVMTI(jvmti->Allocate(sizeof(*list), reinterpret_cast<unsigned char**>(&list)));
+ new (list) RequestList { .fields_ = GetRequestedFields(env, args), };
+ CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(list));
+}
+
+static void VMInitCb(jvmtiEnv* jvmti, JNIEnv* env, jobject thr ATTRIBUTE_UNUSED) {
+ char* args = nullptr;
+ CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&args)));
+ CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(nullptr));
+ CreateFieldList(jvmti, env, args);
+ CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+ CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_DATA_DUMP_REQUEST,
+ nullptr));
+ CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(args)));
+}
+
+static jint AgentStart(JavaVM* vm, char* options, bool is_onload) {
+ android::base::InitLogging(/* argv= */nullptr);
+ java_vm = vm;
+ jvmtiEnv* jvmti = nullptr;
+ if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
+ LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+ return JNI_ERR;
+ }
+ jvmtiCapabilities caps { .can_tag_objects = 1, };
+ CHECK_JVMTI(jvmti->AddCapabilities(&caps));
+ jvmtiEventCallbacks cb {
+ .VMInit = VMInitCb,
+ .DataDumpRequest = DataDumpRequestCb,
+ .VMDeath = VMDeathCb,
+ };
+ CHECK_JVMTI(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+ if (is_onload) {
+ unsigned char* ptr = nullptr;
+ CHECK_JVMTI(jvmti->Allocate(strlen(options) + 1, &ptr));
+ strcpy(reinterpret_cast<char*>(ptr), options);
+ CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(ptr));
+ CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+ } else {
+ JNIEnv* env = nullptr;
+ CHECK_EQ(vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+ CreateFieldList(jvmti, env, options);
+ CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+ CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+ JVMTI_EVENT_DATA_DUMP_REQUEST,
+ nullptr));
+ }
+ return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ return AgentStart(vm, options, /*is_onload=*/false);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm,
+ char* options,
+ void* reserved ATTRIBUTE_UNUSED) {
+ return AgentStart(jvm, options, /*is_onload=*/true);
+}
+
+} // namespace fieldnull
+
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 6d9b6fb..68211a1 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -278,7 +278,7 @@
if (open_writable) {
for (const std::string& filename : dex_paths) {
- File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+ File fd(filename.c_str(), O_RDWR, /* check_usage= */ false);
CHECK_NE(fd.Fd(), -1) << "Unable to open file '" << filename << "': " << strerror(errno);
// Memory-map the dex file with MAP_SHARED flag so that changes in memory
@@ -288,10 +288,10 @@
// We do those checks here and skip them when loading the processed file
// into boot class path.
std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
- /* location */ filename,
- /* verify */ true,
- /* verify_checksum */ true,
- /* mmap_shared */ true,
+ /* location= */ filename,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
+ /* mmap_shared= */ true,
&error_msg));
CHECK(dex_file.get() != nullptr) << "Open failed for '" << filename << "' " << error_msg;
CHECK(dex_file->IsStandardDexFile()) << "Expected a standard dex file '" << filename << "'";
@@ -302,9 +302,9 @@
} else {
for (const std::string& filename : dex_paths) {
bool success = dex_loader.Open(filename.c_str(),
- /* location */ filename,
- /* verify */ true,
- /* verify_checksum */ true,
+ /* location= */ filename,
+ /* verify= */ true,
+ /* verify_checksum= */ true,
&error_msg,
&dex_files_);
CHECK(success) << "Open failed for '" << filename << "' " << error_msg;
@@ -640,7 +640,7 @@
OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
// Open all dex files.
- ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ true);
+ ClassPath boot_classpath(boot_dex_paths_, /* open_writable= */ true);
// Set access flags of all members.
boot_classpath.ForEachDexMember([&api_list](const DexMember& boot_member) {
@@ -688,7 +688,7 @@
std::set<std::string> unresolved;
// Open all dex files.
- ClassPath boot_classpath(boot_dex_paths_, /* open_writable */ false);
+ ClassPath boot_classpath(boot_dex_paths_, /* open_writable= */ false);
Hierarchy boot_hierarchy(boot_classpath);
// Mark all boot dex members private.
@@ -698,7 +698,7 @@
// Resolve each SDK dex member against the framework and mark it white.
for (const std::vector<std::string>& stub_classpath_dex : stub_classpaths_) {
- ClassPath stub_classpath(stub_classpath_dex, /* open_writable */ false);
+ ClassPath stub_classpath(stub_classpath_dex, /* open_writable= */ false);
Hierarchy stub_hierarchy(stub_classpath);
stub_classpath.ForEachDexMember(
[&stub_hierarchy, &boot_hierarchy, &boot_members, &unresolved](
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index b50f684..799546e 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -85,15 +85,15 @@
ArtDexFileLoader dex_loader;
std::string error_msg;
- File fd(file.GetFilename(), O_RDONLY, /* check_usage */ false);
+ File fd(file.GetFilename(), O_RDONLY, /* check_usage= */ false);
if (fd.Fd() == -1) {
LOG(FATAL) << "Unable to open file '" << file.GetFilename() << "': " << strerror(errno);
UNREACHABLE();
}
std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(
- fd.Release(), /* location */ file.GetFilename(), /* verify */ false,
- /* verify_checksum */ true, /* mmap_shared */ false, &error_msg));
+ fd.Release(), /* location= */ file.GetFilename(), /* verify= */ false,
+ /* verify_checksum= */ true, /* mmap_shared= */ false, &error_msg));
if (dex_file.get() == nullptr) {
LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << error_msg;
UNREACHABLE();
@@ -179,22 +179,31 @@
HiddenApiAccessFlags::ApiList GetIMethodHiddenFlags(const DexFile& dex_file) {
return GetMethodHiddenFlags(
- "imethod", 0, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ "imethod", 0, /* expected_native= */ false, FindClass("LMain;", dex_file), dex_file);
}
HiddenApiAccessFlags::ApiList GetSMethodHiddenFlags(const DexFile& dex_file) {
- return GetMethodHiddenFlags(
- "smethod", kAccPublic, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ return GetMethodHiddenFlags("smethod",
+ kAccPublic,
+ /* expected_native= */ false,
+ FindClass("LMain;", dex_file),
+ dex_file);
}
HiddenApiAccessFlags::ApiList GetINMethodHiddenFlags(const DexFile& dex_file) {
- return GetMethodHiddenFlags(
- "inmethod", kAccPublic, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ return GetMethodHiddenFlags("inmethod",
+ kAccPublic,
+ /* expected_native= */ true,
+ FindClass("LMain;", dex_file),
+ dex_file);
}
HiddenApiAccessFlags::ApiList GetSNMethodHiddenFlags(const DexFile& dex_file) {
- return GetMethodHiddenFlags(
- "snmethod", kAccProtected, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ return GetMethodHiddenFlags("snmethod",
+ kAccProtected,
+ /* expected_native= */ true,
+ FindClass("LMain;", dex_file),
+ dex_file);
}
};
diff --git a/tools/jfuzz/jfuzz.cc b/tools/jfuzz/jfuzz.cc
index a97a99c..b8a646d 100644
--- a/tools/jfuzz/jfuzz.cc
+++ b/tools/jfuzz/jfuzz.cc
@@ -562,11 +562,11 @@
case 1:
if (emitArrayVariable(tp))
return;
- // FALL-THROUGH
+ [[fallthrough]];
case 2:
if (emitLocalVariable(tp))
return;
- // FALL-THROUGH
+ [[fallthrough]];
default:
emitFieldVariable(tp);
break;
diff --git a/tools/jit-load/Android.bp b/tools/jit-load/Android.bp
new file mode 100644
index 0000000..a57a408
--- /dev/null
+++ b/tools/jit-load/Android.bp
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+ name: "jitload-defaults",
+ host_supported: true,
+ srcs: [
+ "jitload.cc",
+ ],
+ defaults: ["art_defaults"],
+
+ // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+ // to be same ISA as what it is attached to.
+ compile_multilib: "both",
+
+ shared_libs: [
+ "libbase",
+ ],
+ target: {
+ android: {
+ },
+ host: {
+ },
+ },
+ header_libs: [
+ "libopenjdkjvmti_headers",
+ ],
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+ symlink_preferred_arch: true,
+}
+
+art_cc_library {
+ name: "libjitload",
+ defaults: ["jitload-defaults"],
+ shared_libs: [
+ "libart",
+ "libdexfile",
+ "libprofile",
+ "libartbase",
+ ],
+}
+
+art_cc_library {
+ name: "libjitloadd",
+ defaults: [
+ "art_debug_defaults",
+ "jitload-defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ "libdexfiled",
+ "libprofiled",
+ "libartbased",
+ ],
+}
+
+//art_cc_test {
+// name: "art_titrace_tests",
+// defaults: [
+// "art_gtest_defaults",
+// ],
+// srcs: ["titrace_test.cc"],
+//}
diff --git a/tools/jit-load/README.md b/tools/jit-load/README.md
new file mode 100644
index 0000000..8aa4513
--- /dev/null
+++ b/tools/jit-load/README.md
@@ -0,0 +1,35 @@
+# jitload
+
+Jitload is an art-specific agent allowing one to count the number of classes
+loaded on the jit-thread or verify that none were.
+
+# Usage
+### Build
+> `make libjitload` # or 'make libjitloadd' with debugging checks enabled
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples assume you want to use the 64-bit version.
+### Command Line
+
+> `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitload.so -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise libtitrace agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti and agent.
+* Pass the '=fatal' option to the agent to cause it to abort if any classes are
+ loaded on a jit thread. Otherwise a warning will be printed.
+
+> `art -d -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmtid.so -agentpath:$ANDROID_HOST_OUT/lib64/libjitloadd.so=fatal -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* To use with run-test or testrunner.py use the --with-agent argument.
+
+> `./test/run-test --host --with-agent libtitraced.so=fatal 001-HelloWorld`
+
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+> `kill -SIGQUIT $(pid com.example.android.displayingbitmaps)`
+
+Will initiate a dump of the counts (to logcat).
+
diff --git a/tools/jit-load/jitload.cc b/tools/jit-load/jitload.cc
new file mode 100644
index 0000000..7e715de
--- /dev/null
+++ b/tools/jit-load/jitload.cc
@@ -0,0 +1,144 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <jni.h>
+#include <jvmti.h>
+
+#include "base/runtime_debug.h"
+#include "jit/jit.h"
+#include "runtime-inl.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace jitload {
+
+// Special env version that allows JVMTI-like access on userdebug builds.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+#define CHECK_CALL_SUCCESS(c) \
+ do { \
+ auto vc = (c); \
+ CHECK(vc == JNI_OK || vc == JVMTI_ERROR_NONE) << "call " << #c << " did not succeed\n"; \
+ } while (false)
+
+static jthread GetJitThread() {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ auto* jit = art::Runtime::Current()->GetJit();
+ if (jit == nullptr) {
+ return nullptr;
+ }
+ auto* thread_pool = jit->GetThreadPool();
+ if (thread_pool == nullptr) {
+ return nullptr;
+ }
+ // Currently we only have a single jit thread so we only look at that one.
+ return soa.AddLocalReference<jthread>(
+ thread_pool->GetWorkers()[0]->GetThread()->GetPeerFromOtherThread());
+}
+
+JNICALL void VmInitCb(jvmtiEnv* jvmti,
+ JNIEnv* env ATTRIBUTE_UNUSED,
+ jthread curthread ATTRIBUTE_UNUSED) {
+ jthread jit_thread = GetJitThread();
+ if (jit_thread != nullptr) {
+ CHECK_EQ(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_CLASS_PREPARE, jit_thread),
+ JVMTI_ERROR_NONE);
+ }
+}
+
+struct AgentOptions {
+ bool fatal;
+ uint64_t cnt;
+};
+
+JNICALL static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+ AgentOptions* ops;
+ CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+ LOG(WARNING) << "Jit thread has loaded " << ops->cnt << " classes";
+}
+
+JNICALL void ClassPrepareJit(jvmtiEnv* jvmti,
+ JNIEnv* jni_env ATTRIBUTE_UNUSED,
+ jthread thr ATTRIBUTE_UNUSED,
+ jclass klass) {
+ AgentOptions* ops;
+ CHECK_CALL_SUCCESS(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&ops)));
+ char* klass_name;
+ CHECK_CALL_SUCCESS(jvmti->GetClassSignature(klass, &klass_name, nullptr));
+ (ops->fatal ? LOG_STREAM(FATAL)
+ : LOG_STREAM(WARNING)) << "Loaded " << klass_name << " on jit thread!";
+ ops->cnt++;
+ CHECK_CALL_SUCCESS(jvmti->Deallocate(reinterpret_cast<unsigned char*>(klass_name)));
+}
+
+JNICALL void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+ DataDumpRequestCb(jvmti);
+}
+
+static jvmtiEnv* SetupJvmti(JavaVM* vm, const char* options) {
+ android::base::InitLogging(/* argv= */nullptr);
+
+ jvmtiEnv* jvmti = nullptr;
+ if (vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_0) != JNI_OK &&
+ vm->GetEnv(reinterpret_cast<void**>(&jvmti), kArtTiVersion) != JNI_OK) {
+ LOG(FATAL) << "Unable to setup JVMTI environment!";
+ }
+ jvmtiEventCallbacks cb {
+ .VMInit = VmInitCb,
+ .ClassPrepare = ClassPrepareJit,
+ .DataDumpRequest = DataDumpRequestCb,
+ .VMDeath = VMDeathCb,
+ };
+ AgentOptions* ops;
+ CHECK_CALL_SUCCESS(
+ jvmti->Allocate(sizeof(AgentOptions), reinterpret_cast<unsigned char**>(&ops)));
+ ops->fatal = (strcmp(options, "fatal") == 0);
+ ops->cnt = 0;
+ CHECK_CALL_SUCCESS(jvmti->SetEnvironmentLocalStorage(ops));
+ CHECK_CALL_SUCCESS(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+ CHECK_CALL_SUCCESS(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+ CHECK_CALL_SUCCESS(
+ jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr));
+ return jvmti;
+}
+
+// Early attachment (e.g. 'java -agent[lib|path]:filename.so').
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm, char* options, void* /* reserved */) {
+ SetupJvmti(vm, options);
+ return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* /* reserved */) {
+ jvmtiEnv* jvmti = SetupJvmti(vm, options);
+
+ JNIEnv* jni = nullptr;
+ jthread thr = nullptr;
+ CHECK_CALL_SUCCESS(vm->GetEnv(reinterpret_cast<void**>(&jni), JNI_VERSION_1_6));
+ CHECK_CALL_SUCCESS(jvmti->GetCurrentThread(&thr));
+
+ // Final setup is done in the VmInitCb.
+ VmInitCb(jvmti, jni, thr);
+
+ jni->DeleteLocalRef(thr);
+ return JNI_OK;
+}
+
+#undef CHECK_CALL_SUCCESS
+
+} // namespace jitload
+
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 3ef78d5..a5fa332 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -216,16 +216,6 @@
names: ["libcore.javax.crypto.spec.AlgorithmParametersTestGCM#testEncoding"]
},
{
- description: "Tests fail because mockito can not read android.os.Build$VERSION",
- result: EXEC_FAILED,
- bug: 111704422,
- names: ["libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_calledBeforeDefaultHandler",
- "libcore.java.lang.ThreadTest#testUncaughtExceptionPreHandler_noDefaultHandler",
- "libcore.javax.crypto.CipherInputStreamTest#testCloseTwice",
- "libcore.libcore.io.BlockGuardOsTest#test_android_getaddrinfo_networkPolicy",
- "libcore.libcore.io.BlockGuardOsTest#test_checkNewMethodsInPosix"]
-},
-{
description: "fdsan doesn't exist on the host",
result: EXEC_FAILED,
modes: [host],
@@ -233,6 +223,15 @@
names: ["libcore.libcore.io.FdsanTest#testFileInputStream",
"libcore.libcore.io.FdsanTest#testFileOutputStream",
"libcore.libcore.io.FdsanTest#testRandomAccessFile",
- "libcore.libcore.io.FdsanTest#testParcelFileDescriptor"]
+ "libcore.libcore.io.FdsanTest#testParcelFileDescriptor",
+ "libcore.libcore.io.FdsanTest#testDatagramSocket",
+ "libcore.libcore.io.FdsanTest#testSocket"]
+},
+{
+ description: "Timeout on heap-poisoning target builds",
+ result: EXEC_FAILED,
+ modes: [device],
+ bug: 116446372,
+ names: ["libcore.libcore.io.FdsanTest#testSocket"]
}
]
diff --git a/tools/libcore_gcstress_failures.txt b/tools/libcore_gcstress_failures.txt
index 965e85c..fff1c70 100644
--- a/tools/libcore_gcstress_failures.txt
+++ b/tools/libcore_gcstress_failures.txt
@@ -29,6 +29,7 @@
modes: [device],
names: ["libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_remove",
+ "org.apache.harmony.tests.java.text.DateFormatTest#test_getAvailableLocales",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
"org.apache.harmony.tests.java.util.WeakHashMapTest#test_keySet_hasNext",
"libcore.java.text.DecimalFormatTest#testCurrencySymbolSpacing",
diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg
new file mode 100644
index 0000000..29cca39
--- /dev/null
+++ b/tools/luci/config/cr-buildbucket.cfg
@@ -0,0 +1,130 @@
+# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
+# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
+#
+# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
+# schema of this file and documentation.
+#
+# Please keep this list sorted by bucket name.
+acl_sets {
+ name: "ci"
+ acls {
+ role: READER
+ group: "all"
+ }
+ acls {
+ role: WRITER
+ group: "project-art-admins"
+ }
+ acls {
+ role: SCHEDULER
+ identity: "luci-scheduler@appspot.gserviceaccount.com"
+ }
+}
+
+buckets {
+ name: "luci.art.ci"
+ acl_sets: "ci"
+ swarming {
+ hostname: "chromium-swarm.appspot.com"
+ builder_defaults {
+ dimensions: "cores:8"
+ dimensions: "cpu:x86-64"
+ dimensions: "pool:luci.art.ci"
+ service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+ execution_timeout_secs: 10800 # 3h
+ swarming_tags: "vpython:native-python-wrapper"
+ build_numbers: YES
+ # Some builders require specific hardware, so we make the assignment in bots.cfg
+ auto_builder_dimension: YES
+ luci_migration_host: "luci-migration.appspot.com"
+ recipe {
+ cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
+ cipd_version: "refs/heads/master"
+ name: "art"
+ properties: "mastername:client.art"
+ }
+ }
+
+ builders {
+ name: "angler-armv7-debug"
+ }
+ builders {
+ name: "angler-armv7-generational-cc"
+ }
+ builders {
+ name: "angler-armv7-ndebug"
+ }
+ builders {
+ name: "angler-armv8-debug"
+ }
+ builders {
+ name: "angler-armv8-generational-cc"
+ }
+ builders {
+ name: "angler-armv8-ndebug"
+ }
+ builders {
+ name: "aosp-builder-cc"
+ }
+ builders {
+ name: "aosp-builder-cms"
+ }
+ builders {
+ name: "bullhead-armv7-gcstress-ndebug"
+ }
+ builders {
+ name: "bullhead-armv8-gcstress-debug"
+ }
+ builders {
+ name: "bullhead-armv8-gcstress-ndebug"
+ }
+ builders {
+ name: "fugu-debug"
+ }
+ builders {
+ name: "fugu-ndebug"
+ }
+ builders {
+ name: "host-x86-cms"
+ }
+ builders {
+ name: "host-x86-debug"
+ }
+ builders {
+ name: "host-x86-gcstress-debug"
+ }
+ builders {
+ name: "host-x86-ndebug"
+ }
+ builders {
+ name: "host-x86-poison-debug"
+ }
+ builders {
+ name: "host-x86_64-cdex-fast"
+ }
+ builders {
+ name: "host-x86_64-cms"
+ }
+ builders {
+ name: "host-x86_64-debug"
+ }
+ builders {
+ name: "host-x86_64-generational-cc"
+ }
+ builders {
+ name: "host-x86_64-ndebug"
+ }
+ builders {
+ name: "host-x86_64-poison-debug"
+ }
+ builders {
+ name: "volantis-armv7-poison-debug"
+ }
+ builders {
+ name: "volantis-armv8-poison-debug"
+ }
+ builders {
+ name: "volantis-armv8-poison-ndebug"
+ }
+ }
+}
diff --git a/tools/luci/config/luci-logdog.cfg b/tools/luci/config/luci-logdog.cfg
new file mode 100644
index 0000000..e910bc3
--- /dev/null
+++ b/tools/luci/config/luci-logdog.cfg
@@ -0,0 +1,18 @@
+# For the schema of this file and documentation, see ProjectConfig message in
+# https://luci-config.appspot.com/schemas/services/luci-logdog:logdog.cfg
+# This is for the pdfium project, but we're going to piggyback
+# off of the chromium settings.
+
+# Auth groups who can read log streams.
+# Currently, all projects with "all" (aka public) read/write permissions use
+# the Chromium auth group and buckets.
+reader_auth_groups: "all"
+
+# Auth groups who can register and emit new log streams.
+# These are bots that emit logs.
+writer_auth_groups: "luci-logdog-chromium-writers"
+
+# The base Google Storage archival path for this project.
+#
+# Archived LogDog logs will be written to this bucket/path.
+archive_gs_bucket: "chromium-luci-logdog"
diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg
new file mode 100644
index 0000000..ce22293
--- /dev/null
+++ b/tools/luci/config/luci-milo.cfg
@@ -0,0 +1,145 @@
+logo_url: "https://storage.googleapis.com/chrome-infra-public/logo/art-logo.png"
+
+consoles {
+ id: "main"
+ name: "ART Main Console"
+ repo_url: "https://android.googlesource.com/platform/art"
+ refs: "refs/heads/master"
+ manifest_name: "REVISION"
+
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv7-debug"
+ category: "angler|armv7"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv7-generational-cc"
+ category: "angler|armv7"
+ short_name: "gen"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv7-ndebug"
+ category: "angler|armv7"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv8-debug"
+ category: "angler|armv8"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv8-generational-cc"
+ category: "angler|armv8"
+ short_name: "gen"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/angler-armv8-ndebug"
+ category: "angler|armv8"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/aosp-builder-cc"
+ category: "aosp"
+ short_name: "cc"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/aosp-builder-cms"
+ category: "aosp"
+ short_name: "cms"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/bullhead-armv7-gcstress-ndebug"
+ category: "bullhead|armv7|gcstress"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-debug"
+ category: "bullhead|armv8|gcstress"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-ndebug"
+ category: "bullhead|armv8|gcstress"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/fugu-debug"
+ category: "fugu"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/fugu-ndebug"
+ category: "fugu"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86-cms"
+ category: "host|x86"
+ short_name: "cms"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86-debug"
+ category: "host|x86"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86-ndebug"
+ category: "host|x86"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86-gcstress-debug"
+ category: "host|x86"
+ short_name: "gcs"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86-poison-debug"
+ category: "host|x86"
+ short_name: "psn"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-cdex-fast"
+ category: "host|x64"
+ short_name: "cdx"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-cms"
+ category: "host|x64"
+ short_name: "cms"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-debug"
+ category: "host|x64"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-generational-cc"
+ category: "host|x64"
+ short_name: "gen"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-ndebug"
+ category: "host|x64"
+ short_name: "ndbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/host-x86_64-poison-debug"
+ category: "host|x64"
+ short_name: "psn"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/volantis-armv7-poison-debug"
+ category: "volantis|armv7|poison"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/volantis-armv8-poison-debug"
+ category: "volantis|armv8|poison"
+ short_name: "dbg"
+ }
+ builders {
+ name: "buildbucket/luci.art.ci/volantis-armv8-poison-ndebug"
+ category: "volantis|armv8|poison"
+ short_name: "ndbg"
+ }
+}
diff --git a/tools/luci/config/luci-scheduler.cfg b/tools/luci/config/luci-scheduler.cfg
new file mode 100644
index 0000000..717daa5
--- /dev/null
+++ b/tools/luci/config/luci-scheduler.cfg
@@ -0,0 +1,325 @@
+# Defines jobs on luci-scheduler.appspot.com.
+#
+# For schema of this file and documentation see ProjectConfig message in
+#
+# https://chromium.googlesource.com/infra/luci/luci-go/+/master/scheduler/appengine/messages/config.proto
+
+acl_sets {
+ name: "default"
+ acls {
+ role: READER
+ granted_to: "group:all"
+ }
+ acls {
+ role: OWNER
+ granted_to: "group:project-art-admins"
+ }
+}
+
+trigger {
+ id: "master-gitiles-trigger"
+ acl_sets: "default"
+ gitiles: {
+ repo: "https://android.googlesource.com/platform/art"
+ refs: "refs/heads/master"
+ }
+
+ triggers: "angler-armv7-debug"
+ triggers: "angler-armv7-generational-cc"
+ triggers: "angler-armv7-ndebug"
+ triggers: "angler-armv8-debug"
+ triggers: "angler-armv8-generational-cc"
+ triggers: "angler-armv8-ndebug"
+ triggers: "aosp-builder-cc"
+ triggers: "aosp-builder-cms"
+ triggers: "bullhead-armv7-gcstress-ndebug"
+ triggers: "bullhead-armv8-gcstress-debug"
+ triggers: "bullhead-armv8-gcstress-ndebug"
+ triggers: "fugu-debug"
+ triggers: "fugu-ndebug"
+ triggers: "host-x86-cms"
+ triggers: "host-x86-debug"
+ triggers: "host-x86-gcstress-debug"
+ triggers: "host-x86-ndebug"
+ triggers: "host-x86-poison-debug"
+ triggers: "host-x86_64-cdex-fast"
+ triggers: "host-x86_64-cms"
+ triggers: "host-x86_64-debug"
+ triggers: "host-x86_64-generational-cc"
+ triggers: "host-x86_64-ndebug"
+ triggers: "host-x86_64-poison-debug"
+ triggers: "volantis-armv7-poison-debug"
+ triggers: "volantis-armv8-poison-debug"
+ triggers: "volantis-armv8-poison-ndebug"
+}
+
+job {
+ id: "angler-armv7-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv7-debug"
+ }
+}
+
+job {
+ id: "angler-armv7-generational-cc"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv7-generational-cc"
+ }
+}
+
+job {
+ id: "angler-armv7-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv7-ndebug"
+ }
+}
+
+job {
+ id: "angler-armv8-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv8-debug"
+ }
+}
+
+job {
+ id: "angler-armv8-generational-cc"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv8-generational-cc"
+ }
+}
+
+job {
+ id: "angler-armv8-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "angler-armv8-ndebug"
+ }
+}
+
+job {
+ id: "aosp-builder-cc"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "aosp-builder-cc"
+ }
+}
+
+job {
+ id: "aosp-builder-cms"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "aosp-builder-cms"
+ }
+}
+
+job {
+ id: "bullhead-armv7-gcstress-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "bullhead-armv7-gcstress-ndebug"
+ }
+}
+
+job {
+ id: "bullhead-armv8-gcstress-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "bullhead-armv8-gcstress-debug"
+ }
+}
+
+job {
+ id: "bullhead-armv8-gcstress-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "bullhead-armv8-gcstress-ndebug"
+ }
+}
+
+job {
+ id: "fugu-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "fugu-debug"
+ }
+}
+
+job {
+ id: "fugu-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "fugu-ndebug"
+ }
+}
+
+job {
+ id: "host-x86-cms"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86-cms"
+ }
+}
+
+job {
+ id: "host-x86-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86-debug"
+ }
+}
+
+job {
+ id: "host-x86-gcstress-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86-gcstress-debug"
+ }
+}
+
+job {
+ id: "host-x86-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86-ndebug"
+ }
+}
+
+job {
+ id: "host-x86-poison-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86-poison-debug"
+ }
+}
+
+job {
+ id: "host-x86_64-cdex-fast"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-cdex-fast"
+ }
+}
+
+job {
+ id: "host-x86_64-cms"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-cms"
+ }
+}
+
+job {
+ id: "host-x86_64-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-debug"
+ }
+}
+
+job {
+ id: "host-x86_64-generational-cc"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-generational-cc"
+ }
+}
+
+job {
+ id: "host-x86_64-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-ndebug"
+ }
+}
+
+job {
+ id: "host-x86_64-poison-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "host-x86_64-poison-debug"
+ }
+}
+
+job {
+ id: "volantis-armv7-poison-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "volantis-armv7-poison-debug"
+ }
+}
+
+job {
+ id: "volantis-armv8-poison-debug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "volantis-armv8-poison-debug"
+ }
+}
+
+job {
+ id: "volantis-armv8-poison-ndebug"
+ acl_sets: "default"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "luci.art.ci"
+ builder: "volantis-armv8-poison-ndebug"
+ }
+}
+
diff --git a/tools/luci/config/project.cfg b/tools/luci/config/project.cfg
new file mode 100644
index 0000000..41d172d
--- /dev/null
+++ b/tools/luci/config/project.cfg
@@ -0,0 +1,4 @@
+# For the schema of this file and documentation, see ProjectCfg message in
+# https://luci-config.appspot.com/schemas/projects:project.cfg
+name: "art"
+access: "group:all" # public
diff --git a/tools/prebuilt_libjdwp_art_failures.txt b/tools/prebuilt_libjdwp_art_failures.txt
index 2664560..ee59315 100644
--- a/tools/prebuilt_libjdwp_art_failures.txt
+++ b/tools/prebuilt_libjdwp_art_failures.txt
@@ -10,101 +10,101 @@
description: "Test fails due to unexpectedly getting the thread-groups of zombie threads",
result: EXEC_FAILED,
bug: 66906414,
- name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference.ThreadGroup002Test#testThreadGroup002"
+ name: "org.apache.harmony.jpda.tests.jdwp.ThreadReference_ThreadGroup002Test#testThreadGroup002"
},
{
description: "Test fails due to modifiers not including ACC_SUPER",
result: EXEC_FAILED,
bug: 66906055,
- name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.ModifiersTest#testModifiers001"
+ name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_ModifiersTest#testModifiers001"
},
{
description: "Test fails due to static values not being set correctly.",
result: EXEC_FAILED,
bug: 66905894,
- name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues006Test#testGetValues006"
+ name: "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues006Test#testGetValues006"
},
{
description: "Tests fail with assertion error on slot number",
result: EXEC_FAILED,
bug: 66905468,
- names: [ "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableTest#testVariableTableTest001",
- "org.apache.harmony.jpda.tests.jdwp.Method.VariableTableWithGenericTest#testVariableTableWithGenericTest001" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.Method_VariableTableTest#testVariableTableTest001",
+ "org.apache.harmony.jpda.tests.jdwp.Method_VariableTableWithGenericTest#testVariableTableWithGenericTest001" ]
},
{
description: "Test fails with assertion error 'Invalid Path' for class path.",
result: EXEC_FAILED,
bug: 66904994,
- name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ClassPathsTest#testClassPaths001"
+ name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_ClassPathsTest#testClassPaths001"
},
{
description: "Test fails with Error VM_DEAD when trying to resume during VM_DEATH event",
result: EXEC_FAILED,
bug: 66904725,
- name: "org.apache.harmony.jpda.tests.jdwp.Events.VMDeath002Test#testVMDeathRequest"
+ name: "org.apache.harmony.jpda.tests.jdwp.Events_VMDeath002Test#testVMDeathRequest"
},
{
description: "Test fails with OPAQUE_FRAME error due to attempting a GetLocalReference on a proxy frame instead of GetLocalInstance!",
result: EXEC_FAILED,
bug: 66903662,
- name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ProxyThisObjectTest#testThisObject"
+ name: "org.apache.harmony.jpda.tests.jdwp.StackFrame_ProxyThisObjectTest#testThisObject"
},
{
description: "Test fails with unexpected TYPE_MISMATCH error",
result: EXEC_FAILED,
bug: 66904008,
- name: "org.apache.harmony.jpda.tests.jdwp.StackFrame.ThisObjectTest#testThisObjectTest001"
+ name: "org.apache.harmony.jpda.tests.jdwp.StackFrame_ThisObjectTest#testThisObjectTest001"
},
{
description: "Tests that fail only on ART with INVALID_SLOT error",
result: EXEC_FAILED,
bug: 66903181,
- names: [ "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testBreakpoint",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testException",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testFieldAccess",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testFieldModification",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodEntry",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExit",
- "org.apache.harmony.jpda.tests.jdwp.EventModifiers.InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testBreakpoint",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testException",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testFieldAccess",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testFieldModification",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodEntry",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodExit",
+ "org.apache.harmony.jpda.tests.jdwp.EventModifiers_InstanceOnlyModifierTest#testMethodExitWithReturnValue" ]
},
{
description: "Tests for VMDebug functionality not implemented in the upstream libjdwp",
result: EXEC_FAILED,
- names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest#testVMDebug",
- "org.apache.harmony.jpda.tests.jdwp.VMDebug.VMDebugTest002#testVMDebug" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest#testVMDebug",
+ "org.apache.harmony.jpda.tests.jdwp.VMDebug_VMDebugTest002#testVMDebug" ]
},
/* TODO Categorize these failures more. */
{
description: "Tests that fail on both ART and RI. These tests are likely incorrect",
result: EXEC_FAILED,
bug: 66906734,
- names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference.SetValues003Test#testSetValues003_InvalidIndex",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod002",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.InvokeMethodTest#testInvokeMethod003",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.NewInstanceTest#testNewInstance002",
- "org.apache.harmony.jpda.tests.jdwp.ClassType.SetValues002Test#testSetValues002",
- "org.apache.harmony.jpda.tests.jdwp.Events.ClassPrepare002Test#testClassPrepareCausedByDebugger",
- "org.apache.harmony.jpda.tests.jdwp.Events.ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.DisableCollectionTest#testDisableCollection_null",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_invalid",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection_null",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.GetValues002Test#testGetValues002",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValues003Test#testSetValues003",
- "org.apache.harmony.jpda.tests.jdwp.ObjectReference.SetValuesTest#testSetValues001",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.FieldsWithGenericTest#testFieldsWithGeneric001",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues002Test#testGetValues002",
- "org.apache.harmony.jpda.tests.jdwp.ReferenceType.GetValues004Test#testGetValues004",
- "org.apache.harmony.jpda.tests.jdwp.StringReference.ValueTest#testStringReferenceValueTest001_NullString",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ChildrenTest#testChildren_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.NameTest#testName001_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference.ParentTest#testParent_NullObject",
- "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.CapabilitiesNewTest#testCapabilitiesNew001" ]
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ArrayReference_SetValues003Test#testSetValues003_InvalidIndex",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethod002Test#testInvokeMethod_wrong_argument_types",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_InvokeMethodTest#testInvokeMethod003",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_NewInstanceTest#testNewInstance002",
+ "org.apache.harmony.jpda.tests.jdwp.ClassType_SetValues002Test#testSetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.Events_ClassPrepare002Test#testClassPrepareCausedByDebugger",
+ "org.apache.harmony.jpda.tests.jdwp.Events_ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_DisableCollectionTest#testDisableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_invalid",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_null",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValues003Test#testSetValues003",
+ "org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValuesTest#testSetValues001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_FieldsWithGenericTest#testFieldsWithGeneric001",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues002Test#testGetValues002",
+ "org.apache.harmony.jpda.tests.jdwp.ReferenceType_GetValues004Test#testGetValues004",
+ "org.apache.harmony.jpda.tests.jdwp.StringReference_ValueTest#testStringReferenceValueTest001_NullString",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ChildrenTest#testChildren_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_NameTest#testName001_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ParentTest#testParent_NullObject",
+ "org.apache.harmony.jpda.tests.jdwp.VirtualMachine_CapabilitiesNewTest#testCapabilitiesNew001" ]
},
{
description: "Test for ddms extensions that are not implemented for prebuilt-libjdwp",
result: EXEC_FAILED,
bug: 69169846,
- name: "org.apache.harmony.jpda.tests.jdwp.DDM.DDMTest#testChunk001"
+ name: "org.apache.harmony.jpda.tests.jdwp.DDM_DDMTest#testChunk001"
},
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index b0b5810..20e5c64 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -260,8 +260,8 @@
# we don't want to be trying to connect to adbconnection which might not have
# been built.
vm_args="${vm_args} --vm-arg -XjdwpProvider:none"
- # Make sure the debuggee doesn't clean up what the debugger has generated.
- art_debugee="$art_debugee --no-clean"
+ # Make sure the debuggee doesn't re-generate, nor clean up what the debugger has generated.
+ art_debugee="$art_debugee --no-compile --no-clean"
fi
function jlib_name {
@@ -311,16 +311,16 @@
vm_args="$vm_args --vm-arg $plugin"
fi
-# Because we're running debuggable, we discard any AOT code.
-# Therefore we run de2oat with 'quicken' to avoid spending time compiling.
-vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
-debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
-
-if $instant_jit; then
- debuggee_args="$debuggee_args -Xjitthreshold:0"
-fi
-
if [[ $mode != "ri" ]]; then
+ # Because we're running debuggable, we discard any AOT code.
+ # Therefore we run de2oat with 'quicken' to avoid spending time compiling.
+ vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
+ debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
+
+ if $instant_jit; then
+ debuggee_args="$debuggee_args -Xjitthreshold:0"
+ fi
+
vm_args="$vm_args --vm-arg -Xusejit:$use_jit"
debuggee_args="$debuggee_args -Xusejit:$use_jit"
fi
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 04e80df..ef958d6 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -43,7 +43,7 @@
# Kill logd first, so that when we set the adb buffer size later in this file,
# it is brought up again.
echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
-adb shell killall -9 /system/bin/logd
+adb shell pkill -9 -U logd logd && echo -e "${green}...logd killed${nc}"
# Update date on device if the difference with host is more than one hour.
if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
diff --git a/tools/ti-fast/README.md b/tools/ti-fast/README.md
index bc46882..a0a7dd7 100644
--- a/tools/ti-fast/README.md
+++ b/tools/ti-fast/README.md
@@ -21,6 +21,10 @@
called. This behavior is static. The no-log methods have no branches and just
immediately return.
+* If 'all' is one of the arguments all events the current runtime is capable of
+ providing will be listened for and all other arguments (excepting 'log') will
+ be ignored.
+
* The event-names are the same names as are used in the jvmtiEventCallbacks
struct.
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
index b147add..00ef656 100644
--- a/tools/ti-fast/tifast.cc
+++ b/tools/ti-fast/tifast.cc
@@ -36,6 +36,13 @@
// env.
static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+template <typename ...Args> static void Unused(Args... args ATTRIBUTE_UNUSED) {}
+
+// jthread is a typedef of jobject so we use this to allow the templates to distinguish them.
+struct jthreadContainer { jthread thread; };
+// jlocation is a typedef of jlong so use this to distinguish the less common jlong.
+struct jlongContainer { jlong val; };
+
static void AddCapsForEvent(jvmtiEvent event, jvmtiCapabilities* caps) {
switch (event) {
#define DO_CASE(name, cap_name) \
@@ -63,59 +70,520 @@
}
// Setup for all supported events. Give a macro with fun(name, event_num, args)
-#define FOR_ALL_SUPPORTED_EVENTS(fun) \
- fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation)) \
- fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv*, JNIEnv*, jthread, jmethodID)) \
- fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jboolean, jvalue)) \
- fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, void*, void**)) \
- fun(Exception, EVENT(EXCEPTION), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject, jmethodID, jlocation)) \
- fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv*, JNIEnv*, jthread, jmethodID, jlocation, jobject)) \
- fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv*, JNIEnv*, jthread)) \
- fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv*, JNIEnv*, jthread)) \
- fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
- fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv*, JNIEnv*, jthread, jclass)) \
- fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv*, JNIEnv*, jclass, jobject, const char*, jobject, jint, const unsigned char*, jint*, unsigned char**)) \
- fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv*, jmethodID, jint, const void*, jint, const jvmtiAddrLocationMap*, const void*)) \
- fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv*, jmethodID, const void*)) \
- fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv*, const char*, const void*, jint)) \
- fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv*)) \
- fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
- fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv*, JNIEnv*, jthread, jobject)) \
- fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv*, JNIEnv*, jthread, jobject, jlong)) \
- fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv*, JNIEnv*, jthread, jobject, jboolean)) \
- fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv*, JNIEnv*, jint, const void*, const char*)) \
- fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv*, JNIEnv*, jthread, jobject, jclass, jlong)) \
- fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv*)) \
- fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv*))
+#define FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
+ fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc)) \
+ fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth), (jvmti, jni, jthreadContainer{.thread = thread}, meth)) \
+ fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jboolean jb, jvalue jv), (jvmti, jni, jthreadContainer{.thread = thread}, meth, jb, jv)) \
+ fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, void* v1, void** v2), (jvmti, jni, jthreadContainer{.thread = thread}, meth, v1, v2)) \
+ fun(Exception, EVENT(EXCEPTION), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth1, jlocation loc1, jobject obj, jmethodID meth2, jlocation loc2), (jvmti, jni, jthreadContainer{.thread = thread}, meth1, loc1, obj, meth2, loc2)) \
+ fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc, obj)) \
+ fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+ fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+ fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass) ) \
+ fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass)) \
+ fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, jobject obj1, const char* c1, jobject obj2, jint i1, const unsigned char* c2, jint* ip1, unsigned char** cp1), (jvmti, jni, klass, obj1, c1, obj2, i1, c2, ip1, cp1)) \
+ fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+ fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+ fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, jlongContainer{.val = l1})) \
+ fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jboolean b1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, b1)) \
+ fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv* jvmti, JNIEnv* jni, jint i1, const void* cv, const char* cc), (jvmti, jni, i1, cv, cc)) \
+ fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jclass klass, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, klass, jlongContainer{.val = l1})) \
-#define GENERATE_EMPTY_FUNCTION(name, number, args) \
- static void JNICALL empty ## name args { }
+#define FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun) \
+ fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv* jvmti, jmethodID meth, jint i1, const void* cv1, jint i2, const jvmtiAddrLocationMap* alm, const void* cv2), (jvmti, meth, i1, cv1, i2, alm, cv2)) \
+ fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv* jvmti, jmethodID meth, const void* cv1), (jvmti, meth, cv1)) \
+ fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv* jvmti, const char* cc, const void* cv, jint i1), (jvmti, cc, cv, i1)) \
+ fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv* jvmti), (jvmti)) \
+ fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv* jvmti), (jvmti)) \
+ fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv* jvmti), (jvmti))
+
+#define FOR_ALL_SUPPORTED_EVENTS(fun) \
+ FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
+ FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun)
+
+static const jvmtiEvent kAllEvents[] = {
+#define GET_EVENT(a, event, b, c) event,
+FOR_ALL_SUPPORTED_EVENTS(GET_EVENT)
+#undef GET_EVENT
+};
+
+#define GENERATE_EMPTY_FUNCTION(name, number, args, argnames) \
+ static void JNICALL empty ## name args { Unused argnames ; }
FOR_ALL_SUPPORTED_EVENTS(GENERATE_EMPTY_FUNCTION)
#undef GENERATE_EMPTY_FUNCTION
static jvmtiEventCallbacks kEmptyCallbacks {
-#define CREATE_EMPTY_EVENT_CALLBACKS(name, num, args) \
+#define CREATE_EMPTY_EVENT_CALLBACKS(name, num, args, argnames) \
.name = empty ## name,
FOR_ALL_SUPPORTED_EVENTS(CREATE_EMPTY_EVENT_CALLBACKS)
#undef CREATE_EMPTY_EVENT_CALLBACKS
};
-#define GENERATE_LOG_FUNCTION(name, number, args) \
- static void JNICALL log ## name args { \
- LOG(INFO) << "Got event " << #name ; \
+static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+ if (obj != nullptr && env != nullptr) {
+ env->DeleteLocalRef(obj);
+ }
+}
+
+class ScopedThreadInfo {
+ public:
+ ScopedThreadInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jthread thread)
+ : jvmtienv_(jvmtienv), env_(env), free_name_(false) {
+ if (thread == nullptr) {
+ info_.name = const_cast<char*>("<NULLPTR>");
+ } else if (jvmtienv->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+ info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+ } else {
+ free_name_ = true;
}
-FOR_ALL_SUPPORTED_EVENTS(GENERATE_LOG_FUNCTION)
+ }
+
+ ~ScopedThreadInfo() {
+ if (free_name_) {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+ }
+ DeleteLocalRef(env_, info_.thread_group);
+ DeleteLocalRef(env_, info_.context_class_loader);
+ }
+
+ const char* GetName() const {
+ return info_.name;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ bool free_name_;
+ jvmtiThreadInfo info_{};
+};
+
+class ScopedClassInfo {
+ public:
+ ScopedClassInfo(jvmtiEnv* jvmtienv, jclass c) : jvmtienv_(jvmtienv), class_(c) {}
+
+ ~ScopedClassInfo() {
+ if (class_ != nullptr) {
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+ }
+ }
+
+ bool Init(bool get_generic = true) {
+ if (class_ == nullptr) {
+ name_ = const_cast<char*>("<NONE>");
+ generic_ = const_cast<char*>("<NONE>");
+ return true;
+ } else {
+ jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
+ jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
+ char** gen_ptr = &generic_;
+ if (!get_generic) {
+ generic_ = nullptr;
+ gen_ptr = nullptr;
+ }
+ return jvmtienv_->GetClassSignature(class_, &name_, gen_ptr) == JVMTI_ERROR_NONE &&
+ ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret1 != JVMTI_ERROR_INVALID_CLASS &&
+ ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+ ret2 != JVMTI_ERROR_INVALID_CLASS;
+ }
+ }
+
+ jclass GetClass() const {
+ return class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ const char* GetSourceDebugExtension() const {
+ if (debug_ext_ == nullptr) {
+ return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+ } else {
+ return debug_ext_;
+ }
+ }
+ const char* GetSourceFileName() const {
+ if (file_ == nullptr) {
+ return "<UNKNOWN_FILE>";
+ } else {
+ return file_;
+ }
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ jclass class_;
+ char* name_ = nullptr;
+ char* generic_ = nullptr;
+ char* file_ = nullptr;
+ char* debug_ext_ = nullptr;
+
+ friend std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& m);
+};
+
+class ScopedMethodInfo {
+ public:
+ ScopedMethodInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jmethodID m)
+ : jvmtienv_(jvmtienv), env_(env), method_(m) {}
+
+ ~ScopedMethodInfo() {
+ DeleteLocalRef(env_, declaring_class_);
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+ }
+
+ bool Init(bool get_generic = true) {
+ if (jvmtienv_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+ return false;
+ }
+ class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+ jint nlines;
+ jvmtiLineNumberEntry* lines;
+ jvmtiError err = jvmtienv_->GetLineNumberTable(method_, &nlines, &lines);
+ if (err == JVMTI_ERROR_NONE) {
+ if (nlines > 0) {
+ first_line_ = lines[0].line_number;
+ }
+ jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+ } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+ err != JVMTI_ERROR_NATIVE_METHOD) {
+ return false;
+ }
+ return class_info_->Init(get_generic) &&
+ (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ }
+
+ const ScopedClassInfo& GetDeclaringClassInfo() const {
+ return *class_info_;
+ }
+
+ jclass GetDeclaringClass() const {
+ return declaring_class_;
+ }
+
+ const char* GetName() const {
+ return name_;
+ }
+
+ const char* GetSignature() const {
+ return signature_;
+ }
+
+ const char* GetGeneric() const {
+ return generic_;
+ }
+
+ jint GetFirstLine() const {
+ return first_line_;
+ }
+
+ private:
+ jvmtiEnv* jvmtienv_;
+ JNIEnv* env_;
+ jmethodID method_;
+ jclass declaring_class_ = nullptr;
+ std::unique_ptr<ScopedClassInfo> class_info_;
+ char* name_ = nullptr;
+ char* signature_ = nullptr;
+ char* generic_ = nullptr;
+ jint first_line_ = -1;
+
+ friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
+};
+
+std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& c) {
+ const char* generic = c.GetGeneric();
+ if (generic != nullptr) {
+ return os << c.GetName() << "<" << generic << ">" << " file: " << c.GetSourceFileName();
+ } else {
+ return os << c.GetName() << " file: " << c.GetSourceFileName();
+ }
+}
+
+std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m) {
+ return os << m.GetDeclaringClassInfo().GetName() << "->" << m.GetName() << m.GetSignature()
+ << " (source: " << m.GetDeclaringClassInfo().GetSourceFileName() << ":"
+ << m.GetFirstLine() << ")";
+}
+
+
+class LogPrinter {
+ public:
+ explicit LogPrinter(jvmtiEvent event) : event_(event) {}
+
+ template <typename ...Args> void PrintRestNoJNI(jvmtiEnv* jvmti, Args... args) {
+ PrintRest(jvmti, static_cast<JNIEnv*>(nullptr), args...);
+ }
+
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti, JNIEnv* env, Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jlongContainer l,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jthreadContainer thr,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jboolean i,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jint i,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jclass klass,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jmethodID meth,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jlocation loc,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jint* ip,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ const void* loc,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ void* loc,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ void** loc,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ unsigned char** v,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ const unsigned char* v,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ const char* v,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ const jvmtiAddrLocationMap* v,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jvalue v,
+ Args... args);
+ template <typename ...Args> void PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* env,
+ jobject v,
+ Args... args);
+
+ std::string GetResult() {
+ std::string out_str = stream.str();
+ return start_args + out_str;
+ }
+
+ private:
+ jvmtiEvent event_;
+ std::string start_args;
+ std::ostringstream stream;
+};
+
+// Base case
+template<> void LogPrinter::PrintRest(jvmtiEnv* jvmti ATTRIBUTE_UNUSED, JNIEnv* jni) {
+ if (jni == nullptr) {
+ start_args = "jvmtiEnv*";
+ } else {
+ start_args = "jvmtiEnv*, JNIEnv*";
+ }
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti,
+ JNIEnv* jni,
+ const jvmtiAddrLocationMap* v,
+ Args... args) {
+ if (v != nullptr) {
+ stream << ", const jvmtiAddrLocationMap*[start_address: "
+ << v->start_address << ", location: " << v->location << "]";
+ } else {
+ stream << ", const jvmtiAddrLocationMap*[nullptr]";
+ }
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jint* v, Args... args) {
+ stream << ", jint*[" << static_cast<const void*>(v) << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const void* v, Args... args) {
+ stream << ", const void*[" << v << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, unsigned char** v, Args... args) {
+ stream << ", unsigned char**[" << static_cast<const void*>(v) << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const unsigned char* v, Args... args) {
+ stream << ", const unsigned char*[" << static_cast<const void*>(v) << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, const char* v, Args... args) {
+ stream << ", const char*[" << v << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jvalue v ATTRIBUTE_UNUSED, Args... args) {
+ stream << ", jvalue[<UNION>]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, void** v, Args... args) {
+ stream << ", void**[" << v << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, void* v, Args... args) {
+ stream << ", void*[" << v << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jlongContainer l, Args... args) {
+ stream << ", jlong[" << l.val << ", hex: 0x" << std::hex << l.val << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jlocation l, Args... args) {
+ stream << ", jlocation[" << l << ", hex: 0x" << std::hex << l << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jboolean b, Args... args) {
+ stream << ", jboolean[" << (b ? "true" : "false") << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jint i, Args... args) {
+ stream << ", jint[" << i << ", hex: 0x" << std::hex << i << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jobject obj, Args... args) {
+ if (obj == nullptr) {
+ stream << ", jobject[nullptr]";
+ } else {
+ jni->PushLocalFrame(1);
+ jclass klass = jni->GetObjectClass(obj);
+ ScopedClassInfo sci(jvmti, klass);
+ if (sci.Init(event_ != JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+ stream << ", jobject[type: " << sci << "]";
+ } else {
+ stream << ", jobject[type: TYPE UNKNOWN]";
+ }
+ jni->PopLocalFrame(nullptr);
+ }
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jthreadContainer thr, Args... args) {
+ ScopedThreadInfo sti(jvmti, jni, thr.thread);
+ stream << ", jthread[" << sti.GetName() << "]";
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, Args... args) {
+ ScopedClassInfo sci(jvmti, klass);
+ if (sci.Init(/*get_generic=*/event_ != JVMTI_EVENT_VM_OBJECT_ALLOC)) {
+ stream << ", jclass[" << sci << "]";
+ } else {
+ stream << ", jclass[TYPE UNKNOWN]";
+ }
+ PrintRest(jvmti, jni, args...);
+}
+
+template<typename ...Args>
+void LogPrinter::PrintRest(jvmtiEnv* jvmti, JNIEnv* jni, jmethodID meth, Args... args) {
+ ScopedMethodInfo smi(jvmti, jni, meth);
+ if (smi.Init()) {
+ stream << ", jmethodID[" << smi << "]";
+ } else {
+ stream << ", jmethodID[METHOD UNKNOWN]";
+ }
+ PrintRest(jvmti, jni, args...);
+}
+
+#define GENERATE_LOG_FUNCTION_JNI(name, event, args, argnames) \
+ static void JNICALL log ## name args { \
+ LogPrinter printer(event); \
+ printer.PrintRest argnames; \
+ LOG(INFO) << "Got event " << #name << "(" << printer.GetResult() << ")"; \
+ } \
+
+#define GENERATE_LOG_FUNCTION_NO_JNI(name, event, args, argnames) \
+ static void JNICALL log ## name args { \
+ LogPrinter printer(event); \
+ printer.PrintRestNoJNI argnames; \
+ LOG(INFO) << "Got event " << #name << "(" << printer.GetResult() << ")"; \
+ } \
+
+FOR_ALL_SUPPORTED_JNI_EVENTS(GENERATE_LOG_FUNCTION_JNI)
+FOR_ALL_SUPPORTED_NO_JNI_EVENTS(GENERATE_LOG_FUNCTION_NO_JNI)
#undef GENERATE_LOG_FUNCTION
static jvmtiEventCallbacks kLogCallbacks {
-#define CREATE_LOG_EVENT_CALLBACK(name, num, args) \
+#define CREATE_LOG_EVENT_CALLBACK(name, num, args, argnames) \
.name = log ## name,
FOR_ALL_SUPPORTED_EVENTS(CREATE_LOG_EVENT_CALLBACK)
#undef CREATE_LOG_EVENT_CALLBACK
};
+static std::string EventToName(jvmtiEvent desired_event) {
+#define CHECK_NAME(name, event, args, argnames) \
+ if (desired_event == event) { \
+ return #name; \
+ }
+ FOR_ALL_SUPPORTED_EVENTS(CHECK_NAME);
+ LOG(FATAL) << "Unknown event " << desired_event;
+ __builtin_unreachable();
+#undef CHECK_NAME
+}
static jvmtiEvent NameToEvent(const std::string& desired_name) {
-#define CHECK_NAME(name, event, args) \
+#define CHECK_NAME(name, event, args, argnames) \
if (desired_name == #name) { \
return event; \
}
@@ -125,14 +593,46 @@
#undef CHECK_NAME
}
+#undef FOR_ALL_SUPPORTED_JNI_EVENTS
+#undef FOR_ALL_SUPPORTED_NO_JNI_EVENTS
#undef FOR_ALL_SUPPORTED_EVENTS
-static std::vector<jvmtiEvent> GetRequestedEventList(const std::string& args) {
+
+static std::vector<jvmtiEvent> GetAllAvailableEvents(jvmtiEnv* jvmti) {
+ std::vector<jvmtiEvent> out;
+ jvmtiCapabilities caps{};
+ jvmti->GetPotentialCapabilities(&caps);
+ uint8_t caps_bytes[sizeof(caps)];
+ memcpy(caps_bytes, &caps, sizeof(caps));
+ for (jvmtiEvent e : kAllEvents) {
+ jvmtiCapabilities req{};
+ AddCapsForEvent(e, &req);
+ uint8_t req_bytes[sizeof(req)];
+ memcpy(req_bytes, &req, sizeof(req));
+ bool good = true;
+ for (size_t i = 0; i < sizeof(caps); i++) {
+ if ((req_bytes[i] & caps_bytes[i]) != req_bytes[i]) {
+ good = false;
+ break;
+ }
+ }
+ if (good) {
+ out.push_back(e);
+ } else {
+ LOG(WARNING) << "Unable to get capabilities for event " << EventToName(e);
+ }
+ }
+ return out;
+}
+
+static std::vector<jvmtiEvent> GetRequestedEventList(jvmtiEnv* jvmti, const std::string& args) {
std::vector<jvmtiEvent> res;
std::stringstream args_stream(args);
std::string item;
while (std::getline(args_stream, item, ',')) {
if (item == "") {
continue;
+ } else if (item == "all") {
+ return GetAllAvailableEvents(jvmti);
}
res.push_back(NameToEvent(item));
}
@@ -168,12 +668,17 @@
args = args.substr(3);
}
- std::vector<jvmtiEvent> events = GetRequestedEventList(args);
+ std::vector<jvmtiEvent> events = GetRequestedEventList(jvmti, args);
jvmtiCapabilities caps{};
for (jvmtiEvent e : events) {
AddCapsForEvent(e, &caps);
}
+ if (is_log) {
+ caps.can_get_line_numbers = 1;
+ caps.can_get_source_file_name = 1;
+ caps.can_get_source_debug_extension = 1;
+ }
error = jvmti->AddCapabilities(&caps);
if (error != JVMTI_ERROR_NONE) {
LOG(ERROR) << "Unable to set caps";
diff --git a/tools/titrace/instruction_decoder.cc b/tools/titrace/instruction_decoder.cc
index d8fb713..7f8b296 100644
--- a/tools/titrace/instruction_decoder.cc
+++ b/tools/titrace/instruction_decoder.cc
@@ -32,7 +32,7 @@
return Bytecode::ToString(op);
}
- virtual size_t LocationToOffset(size_t j_location) {
+ size_t LocationToOffset(size_t j_location) override {
return j_location;
}
@@ -474,7 +474,7 @@
return Bytecode::ToString(op);
}
- virtual size_t LocationToOffset(size_t j_location) {
+ size_t LocationToOffset(size_t j_location) override {
// dex pc is uint16_t*, but offset needs to be in bytes.
return j_location * (sizeof(uint16_t) / sizeof(uint8_t));
}
diff --git a/tools/titrace/titrace.cc b/tools/titrace/titrace.cc
index 981ad56..1e49c0b 100644
--- a/tools/titrace/titrace.cc
+++ b/tools/titrace/titrace.cc
@@ -54,7 +54,7 @@
}
TiMemory(const TiMemory& other) = delete;
- TiMemory(TiMemory&& other) {
+ TiMemory(TiMemory&& other) noexcept {
env_ = other.env_;
mem_ = other.mem_;
size_ = other.size_;
@@ -66,7 +66,7 @@
}
}
- TiMemory& operator=(TiMemory&& other) {
+ TiMemory& operator=(TiMemory&& other) noexcept {
if (mem_ != other.mem_) {
TiMemory::~TiMemory();
}
@@ -237,7 +237,7 @@
void* /* reserved */) {
using namespace titrace; // NOLINT [build/namespaces] [5]
- android::base::InitLogging(/* argv */nullptr);
+ android::base::InitLogging(/* argv= */nullptr);
jvmtiEnv* jvmti = nullptr;
{
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index 4ea5b2d..98f7ea5 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -111,13 +111,6 @@
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
- void InvokeVirtualOrInterface(art::Thread* thread ATTRIBUTE_UNUSED,
- art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
- art::ArtMethod* caller ATTRIBUTE_UNUSED,
- uint32_t dex_pc ATTRIBUTE_UNUSED,
- art::ArtMethod* callee ATTRIBUTE_UNUSED)
- override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
-
void WatchedFramePop(art::Thread* thread ATTRIBUTE_UNUSED,
const art::ShadowFrame& frame ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index 96d4a09..92ace03 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -14,6 +14,7 @@
cc_binary {
name: "veridex",
+ defaults: ["art_defaults"],
host_supported: true,
srcs: [
"flow_analysis.cc",
diff --git a/tools/veridex/Android.mk b/tools/veridex/Android.mk
index 2faa577..c0b5ca1 100644
--- a/tools/veridex/Android.mk
+++ b/tools/veridex/Android.mk
@@ -31,6 +31,7 @@
$(transform-classes.jar-to-dex)
app_compat_lists := \
+ $(INTERNAL_PLATFORM_HIDDENAPI_WHITELIST) \
$(INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST) \
$(INTERNAL_PLATFORM_HIDDENAPI_DARK_GREYLIST) \
$(INTERNAL_PLATFORM_HIDDENAPI_BLACKLIST)
@@ -46,22 +47,30 @@
$(VERIDEX_FILES_PATH): PRIVATE_VERIDEX_FILES := $(VERIDEX_FILES)
$(VERIDEX_FILES_PATH): PRIVATE_APP_COMPAT_LISTS := $(app_compat_lists)
+$(VERIDEX_FILES_PATH): PRIVATE_SYSTEM_STUBS_DEX_DIR := $(dir $(system_stub_dex))
$(VERIDEX_FILES_PATH): PRIVATE_SYSTEM_STUBS_ZIP := $(dir $(VERIDEX_FILES_PATH))/system-stubs.zip
+$(VERIDEX_FILES_PATH): PRIVATE_OAHL_STUBS_DEX_DIR := $(dir $(oahl_stub_dex))
$(VERIDEX_FILES_PATH): PRIVATE_OAHL_STUBS_ZIP := $(dir $(VERIDEX_FILES_PATH))/org.apache.http.legacy-stubs.zip
$(VERIDEX_FILES_PATH) : $(SOONG_ZIP) $(VERIDEX_FILES) $(app_compat_lists) $(HOST_OUT_EXECUTABLES)/veridex $(system_stub_dex) $(oahl_stub_dex)
- $(hide) rm -f $(PRIVATE_SYSTEM_STUBS_ZIP) $(PRIVATE_OAHL_STUBS_ZIP)
- $(hide) zip -j $(PRIVATE_SYSTEM_STUBS_ZIP) $(dir $(system_stub_dex))/classes*.dex
- $(hide) zip -j $(PRIVATE_OAHL_STUBS_ZIP) $(dir $(oahl_stub_dex))/classes*.dex
- $(hide) $(SOONG_ZIP) -o $@ -C art/tools/veridex -f $(PRIVATE_VERIDEX_FILES) \
- -C $(dir $(lastword $(PRIVATE_APP_COMPAT_LISTS))) $(addprefix -f , $(PRIVATE_APP_COMPAT_LISTS)) \
- -C $(HOST_OUT_EXECUTABLES) -f $(HOST_OUT_EXECUTABLES)/veridex \
- -C $(dir $(PRIVATE_SYSTEM_STUBS_ZIP)) -f $(PRIVATE_SYSTEM_STUBS_ZIP) \
- -C $(dir $(PRIVATE_OAHL_STUBS_ZIP)) -f $(PRIVATE_OAHL_STUBS_ZIP)
- $(hide) rm -f $(PRIVATE_SYSTEM_STUBS_ZIP)
- $(hide) rm -f $(PRIVATE_OAHL_STUBS_ZIP)
+ rm -rf $(dir $@)/*
+ ls -1 $(PRIVATE_SYSTEM_STUBS_DEX_DIR)/classes*.dex | sort >$(PRIVATE_SYSTEM_STUBS_ZIP).list
+ $(SOONG_ZIP) -o $(PRIVATE_SYSTEM_STUBS_ZIP) -C $(PRIVATE_SYSTEM_STUBS_DEX_DIR) -l $(PRIVATE_SYSTEM_STUBS_ZIP).list
+ rm $(PRIVATE_SYSTEM_STUBS_ZIP).list
+ ls -1 $(PRIVATE_OAHL_STUBS_DEX_DIR)/classes*.dex | sort >$(PRIVATE_OAHL_STUBS_ZIP).list
+ $(SOONG_ZIP) -o $(PRIVATE_OAHL_STUBS_ZIP) -C $(PRIVATE_OAHL_STUBS_DEX_DIR) -l $(PRIVATE_OAHL_STUBS_ZIP).list
+ rm $(PRIVATE_OAHL_STUBS_ZIP).list
+ $(SOONG_ZIP) -o $@ -C art/tools/veridex -f $(PRIVATE_VERIDEX_FILES) \
+ -C $(dir $(lastword $(PRIVATE_APP_COMPAT_LISTS))) $(addprefix -f , $(PRIVATE_APP_COMPAT_LISTS)) \
+ -C $(HOST_OUT_EXECUTABLES) -f $(HOST_OUT_EXECUTABLES)/veridex \
+ -C $(dir $(PRIVATE_SYSTEM_STUBS_ZIP)) -f $(PRIVATE_SYSTEM_STUBS_ZIP) \
+ -C $(dir $(PRIVATE_OAHL_STUBS_ZIP)) -f $(PRIVATE_OAHL_STUBS_ZIP)
+ rm -f $(PRIVATE_SYSTEM_STUBS_ZIP)
+ rm -f $(PRIVATE_OAHL_STUBS_ZIP)
# Make the zip file available for prebuilts.
$(call dist-for-goals,sdk,$(VERIDEX_FILES_PATH))
VERIDEX_FILES :=
app_compat_lists :=
+system_stub_dex :=
+oahl_stub_dex :=
diff --git a/tools/veridex/appcompat.sh b/tools/veridex/appcompat.sh
index e7b735d..f57c8a4 100755
--- a/tools/veridex/appcompat.sh
+++ b/tools/veridex/appcompat.sh
@@ -22,6 +22,7 @@
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [[ -e ${SCRIPT_DIR}/veridex && \
+ -e ${SCRIPT_DIR}/hiddenapi-whitelist.txt && \
-e ${SCRIPT_DIR}/hiddenapi-blacklist.txt && \
-e ${SCRIPT_DIR}/hiddenapi-light-greylist.txt && \
-e ${SCRIPT_DIR}/hiddenapi-dark-greylist.txt && \
@@ -29,6 +30,7 @@
-e ${SCRIPT_DIR}/system-stubs.zip ]]; then
exec ${SCRIPT_DIR}/veridex \
--core-stubs=${SCRIPT_DIR}/system-stubs.zip:${SCRIPT_DIR}/org.apache.http.legacy-stubs.zip \
+ --whitelist=${SCRIPT_DIR}/hiddenapi-whitelist.txt \
--blacklist=${SCRIPT_DIR}/hiddenapi-blacklist.txt \
--light-greylist=${SCRIPT_DIR}/hiddenapi-light-greylist.txt \
--dark-greylist=${SCRIPT_DIR}/hiddenapi-dark-greylist.txt \
@@ -43,8 +45,8 @@
fi
# Logic for setting out_dir from build/make/core/envsetup.mk:
-if [[ -z $OUT_DIR ]]; then
- if [[ -z $OUT_DIR_COMMON_BASE ]]; then
+if [[ -z "${OUT_DIR}" ]]; then
+ if [[ -z "${OUT_DIR_COMMON_BASE}" ]]; then
OUT=out
else
OUT=${OUT_DIR_COMMON_BASE}/${PWD##*/}
@@ -53,15 +55,18 @@
OUT=${OUT_DIR}
fi
-PACKAGING=${OUT}/target/common/obj/PACKAGING
+if [[ -z "${PACKAGING}" ]]; then
+ PACKAGING=${OUT}/target/common/obj/PACKAGING
+fi
-if [ -z "$ANDROID_HOST_OUT" ] ; then
+if [[ -z "${ANDROID_HOST_OUT}" ]]; then
ANDROID_HOST_OUT=${OUT}/host/linux-x86
fi
${ANDROID_HOST_OUT}/bin/veridex \
--core-stubs=${PACKAGING}/core_dex_intermediates/classes.dex:${PACKAGING}/oahl_dex_intermediates/classes.dex \
+ --whitelist=${PACKAGING}/hiddenapi-whitelist.txt \
--blacklist=${PACKAGING}/hiddenapi-blacklist.txt \
--light-greylist=${PACKAGING}/hiddenapi-light-greylist.txt \
--dark-greylist=${PACKAGING}/hiddenapi-dark-greylist.txt \
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index f5eb4ea..e925e1d 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -318,7 +318,7 @@
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_VIRTUAL: {
- last_result_ = AnalyzeInvoke(instruction, /* is_range */ false);
+ last_result_ = AnalyzeInvoke(instruction, /* is_range= */ false);
break;
}
@@ -327,7 +327,7 @@
case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_SUPER_RANGE:
case Instruction::INVOKE_VIRTUAL_RANGE: {
- last_result_ = AnalyzeInvoke(instruction, /* is_range */ true);
+ last_result_ = AnalyzeInvoke(instruction, /* is_range= */ true);
break;
}
@@ -495,7 +495,7 @@
case Instruction::DIV_INT_LIT8:
case Instruction::REM_INT_LIT8:
case Instruction::SHL_INT_LIT8:
- case Instruction::SHR_INT_LIT8: {
+ case Instruction::SHR_INT_LIT8:
case Instruction::USHR_INT_LIT8: {
UpdateRegister(instruction.VRegA(), VeriClass::integer_);
break;
@@ -537,7 +537,7 @@
case Instruction::CMPG_FLOAT:
case Instruction::CMPG_DOUBLE:
case Instruction::CMPL_FLOAT:
- case Instruction::CMPL_DOUBLE:
+ case Instruction::CMPL_DOUBLE: {
UpdateRegister(instruction.VRegA(), VeriClass::integer_);
break;
}
@@ -702,14 +702,14 @@
// second parameter for the field name.
RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
- uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ false));
+ uses_.push_back(ReflectAccessInfo(cls, name, /* is_method= */ false));
return GetReturnType(id);
} else if (IsGetMethod(method)) {
// Class.getMethod or Class.getDeclaredMethod. Fetch the first parameter for the class, and the
// second parameter for the field name.
RegisterValue cls = GetRegister(GetParameterAt(instruction, is_range, args, 0));
RegisterValue name = GetRegister(GetParameterAt(instruction, is_range, args, 1));
- uses_.push_back(ReflectAccessInfo(cls, name, /* is_method */ true));
+ uses_.push_back(ReflectAccessInfo(cls, name, /* is_method= */ true));
return GetReturnType(id);
} else if (method == VeriClass::getClass_) {
// Get the type of the first parameter.
diff --git a/tools/veridex/flow_analysis.h b/tools/veridex/flow_analysis.h
index 865b9df..2151a41 100644
--- a/tools/veridex/flow_analysis.h
+++ b/tools/veridex/flow_analysis.h
@@ -174,7 +174,8 @@
RegisterValue name;
bool is_method;
- ReflectAccessInfo(RegisterValue c, RegisterValue n, bool m) : cls(c), name(n), is_method(m) {}
+ ReflectAccessInfo(RegisterValue c, RegisterValue n, bool is_method)
+ : cls(c), name(n), is_method(is_method) {}
bool IsConcrete() const {
// We capture RegisterSource::kString for the class, for example in Class.forName.
diff --git a/tools/veridex/hidden_api.h b/tools/veridex/hidden_api.h
index b1c8559..68485bd 100644
--- a/tools/veridex/hidden_api.h
+++ b/tools/veridex/hidden_api.h
@@ -33,10 +33,14 @@
*/
class HiddenApi {
public:
- HiddenApi(const char* blacklist, const char* dark_greylist, const char* light_greylist) {
+ HiddenApi(const char* whitelist,
+ const char* blacklist,
+ const char* dark_greylist,
+ const char* light_greylist) {
FillList(light_greylist, light_greylist_);
FillList(dark_greylist, dark_greylist_);
FillList(blacklist, blacklist_);
+ FillList(whitelist, whitelist_);
}
HiddenApiAccessFlags::ApiList GetApiList(const std::string& name) const {
@@ -46,13 +50,15 @@
return HiddenApiAccessFlags::kDarkGreylist;
} else if (IsInList(name, light_greylist_)) {
return HiddenApiAccessFlags::kLightGreylist;
- } else {
+ } else if (IsInList(name, whitelist_)) {
return HiddenApiAccessFlags::kWhitelist;
+ } else {
+ return HiddenApiAccessFlags::kNoList;
}
}
- bool IsInRestrictionList(const std::string& name) const {
- return GetApiList(name) != HiddenApiAccessFlags::kWhitelist;
+ bool IsInAnyList(const std::string& name) const {
+ return GetApiList(name) != HiddenApiAccessFlags::kNoList;
}
static std::string GetApiMethodName(const DexFile& dex_file, uint32_t method_index);
@@ -76,6 +82,7 @@
static void FillList(const char* filename, std::set<std::string>& entries);
+ std::set<std::string> whitelist_;
std::set<std::string> blacklist_;
std::set<std::string> light_greylist_;
std::set<std::string> dark_greylist_;
@@ -85,7 +92,7 @@
uint32_t count = 0;
uint32_t reflection_count = 0;
uint32_t linking_count = 0;
- uint32_t api_counts[4] = { 0, 0, 0, 0 };
+ uint32_t api_counts[5] = { 0, 0, 0, 0, 0 };
};
} // namespace art
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index 4eba10e..a8c53b3 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -35,7 +35,7 @@
// Note: we always query whether a method is in a list, as the app
// might define blacklisted APIs (which won't be used at runtime).
std::string name = HiddenApi::GetApiMethodName(resolver->GetDexFile(), method_id);
- if (hidden_api_.IsInRestrictionList(name)) {
+ if (hidden_api_.IsInAnyList(name)) {
method_locations_[name].push_back(ref);
}
}
@@ -46,7 +46,7 @@
// Note: we always query whether a field is in a list, as the app
// might define blacklisted APIs (which won't be used at runtime).
std::string name = HiddenApi::GetApiFieldName(resolver->GetDexFile(), field_id);
- if (hidden_api_.IsInRestrictionList(name)) {
+ if (hidden_api_.IsInAnyList(name)) {
field_locations_[name].push_back(ref);
}
}
@@ -57,7 +57,7 @@
// types can lead to being used through reflection.
for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
std::string name(dex_file.StringByTypeIdx(dex::TypeIndex(i)));
- if (hidden_api_.IsInRestrictionList(name)) {
+ if (hidden_api_.IsInAnyList(name)) {
classes_.insert(name);
}
}
@@ -81,9 +81,9 @@
// private methods and fields in them.
// We don't add class names to the `strings_` set as we know method/field names
// don't have '.' or '/'. All hidden API class names have a '/'.
- if (hidden_api_.IsInRestrictionList(str)) {
+ if (hidden_api_.IsInAnyList(str)) {
classes_.insert(str);
- } else if (hidden_api_.IsInRestrictionList(name)) {
+ } else if (hidden_api_.IsInAnyList(name)) {
// Could be something passed to JNI.
classes_.insert(name);
} else {
@@ -174,30 +174,27 @@
void HiddenApiFinder::Dump(std::ostream& os,
HiddenApiStats* stats,
bool dump_reflection) {
- static const char* kPrefix = " ";
stats->linking_count = method_locations_.size() + field_locations_.size();
// Dump methods from hidden APIs linked against.
- for (const std::pair<std::string, std::vector<MethodReference>>& pair : method_locations_) {
+ for (const std::pair<const std::string,
+ std::vector<MethodReference>>& pair : method_locations_) {
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(pair.first);
stats->api_counts[api_list]++;
os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
os << std::endl;
- for (const MethodReference& ref : pair.second) {
- os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
- }
+ HiddenApiFinder::DumpReferences(os, pair.second);
os << std::endl;
}
// Dump fields from hidden APIs linked against.
- for (const std::pair<std::string, std::vector<MethodReference>>& pair : field_locations_) {
+ for (const std::pair<const std::string,
+ std::vector<MethodReference>>& pair : field_locations_) {
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(pair.first);
stats->api_counts[api_list]++;
os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
os << std::endl;
- for (const MethodReference& ref : pair.second) {
- os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
- }
+ HiddenApiFinder::DumpReferences(os, pair.second);
os << std::endl;
}
@@ -208,14 +205,12 @@
std::string full_name = cls + "->" + name;
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
stats->api_counts[api_list]++;
- if (api_list != HiddenApiAccessFlags::kWhitelist) {
+ if (api_list != HiddenApiAccessFlags::kNoList) {
stats->reflection_count++;
os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name
<< " potential use(s):";
os << std::endl;
- for (const MethodReference& ref : reflection_locations_[name]) {
- os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
- }
+ HiddenApiFinder::DumpReferences(os, reflection_locations_[name]);
os << std::endl;
}
}
@@ -223,4 +218,27 @@
}
}
+void HiddenApiFinder::DumpReferences(std::ostream& os,
+ const std::vector<MethodReference>& references) {
+ static const char* kPrefix = " ";
+
+ // Count number of occurrences of each reference, to make the output clearer.
+ std::map<std::string, size_t> counts;
+ for (const MethodReference& ref : references) {
+ std::string ref_string = HiddenApi::GetApiMethodName(ref);
+ if (!counts.count(ref_string)) {
+ counts[ref_string] = 0;
+ }
+ counts[ref_string]++;
+ }
+
+ for (const std::pair<const std::string, size_t>& pair : counts) {
+ os << kPrefix << pair.first;
+ if (pair.second > 1) {
+ os << " (" << pair.second << " occurrences)";
+ }
+ os << std::endl;
+ }
+}
+
} // namespace art
diff --git a/tools/veridex/hidden_api_finder.h b/tools/veridex/hidden_api_finder.h
index f7d3dc8..9e10c1a 100644
--- a/tools/veridex/hidden_api_finder.h
+++ b/tools/veridex/hidden_api_finder.h
@@ -47,6 +47,7 @@
void CollectAccesses(VeridexResolver* resolver);
void CheckMethod(uint32_t method_idx, VeridexResolver* resolver, MethodReference ref);
void CheckField(uint32_t field_idx, VeridexResolver* resolver, MethodReference ref);
+ void DumpReferences(std::ostream& os, const std::vector<MethodReference>& references);
const HiddenApi& hidden_api_;
std::set<std::string> classes_;
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 445221e..9e02cbf 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -85,20 +85,20 @@
void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
static const char* kPrefix = " ";
std::map<std::string, std::vector<MethodReference>> named_uses;
- for (auto it : concrete_uses_) {
+ for (auto& it : concrete_uses_) {
MethodReference ref = it.first;
for (const ReflectAccessInfo& info : it.second) {
std::string cls(info.cls.ToString());
std::string name(info.name.ToString());
std::string full_name = cls + "->" + name;
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
- if (api_list != HiddenApiAccessFlags::kWhitelist) {
+ if (api_list != HiddenApiAccessFlags::kNoList) {
named_uses[full_name].push_back(ref);
}
}
}
- for (auto it : named_uses) {
+ for (auto& it : named_uses) {
++stats->reflection_count;
const std::string& full_name = it.first;
HiddenApiAccessFlags::ApiList api_list = hidden_api_.GetApiList(full_name);
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 1d3a4fb..7206c7d 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -65,14 +65,26 @@
VeriMethod VeriClass::loadClass_ = nullptr;
VeriField VeriClass::sdkInt_ = nullptr;
+static const char* kDexFileOption = "--dex-file=";
+static const char* kStubsOption = "--core-stubs=";
+static const char* kWhitelistOption = "--whitelist=";
+static const char* kBlacklistOption = "--blacklist=";
+static const char* kDarkGreylistOption = "--dark-greylist=";
+static const char* kLightGreylistOption = "--light-greylist=";
+static const char* kImprecise = "--imprecise";
+static const char* kTargetSdkVersion = "--target-sdk-version=";
+static const char* kOnlyReportSdkUses = "--only-report-sdk-uses";
+
struct VeridexOptions {
const char* dex_file = nullptr;
const char* core_stubs = nullptr;
+ const char* whitelist = nullptr;
const char* blacklist = nullptr;
const char* light_greylist = nullptr;
const char* dark_greylist = nullptr;
bool precise = true;
int target_sdk_version = 28; /* P */
+ bool only_report_sdk_uses = false;
};
static const char* Substr(const char* str, int index) {
@@ -88,19 +100,13 @@
argv++;
argc--;
- static const char* kDexFileOption = "--dex-file=";
- static const char* kStubsOption = "--core-stubs=";
- static const char* kBlacklistOption = "--blacklist=";
- static const char* kDarkGreylistOption = "--dark-greylist=";
- static const char* kLightGreylistOption = "--light-greylist=";
- static const char* kImprecise = "--imprecise";
- static const char* kTargetSdkVersion = "--target-sdk-version=";
-
for (int i = 0; i < argc; ++i) {
if (StartsWith(argv[i], kDexFileOption)) {
options->dex_file = Substr(argv[i], strlen(kDexFileOption));
} else if (StartsWith(argv[i], kStubsOption)) {
options->core_stubs = Substr(argv[i], strlen(kStubsOption));
+ } else if (StartsWith(argv[i], kWhitelistOption)) {
+ options->whitelist = Substr(argv[i], strlen(kWhitelistOption));
} else if (StartsWith(argv[i], kBlacklistOption)) {
options->blacklist = Substr(argv[i], strlen(kBlacklistOption));
} else if (StartsWith(argv[i], kDarkGreylistOption)) {
@@ -111,6 +117,8 @@
options->precise = false;
} else if (StartsWith(argv[i], kTargetSdkVersion)) {
options->target_sdk_version = atoi(Substr(argv[i], strlen(kTargetSdkVersion)));
+ } else if (strcmp(argv[i], kOnlyReportSdkUses) == 0) {
+ options->only_report_sdk_uses = true;
}
}
}
@@ -130,6 +138,12 @@
static int Run(int argc, char** argv) {
VeridexOptions options;
ParseArgs(&options, argc, argv);
+
+ if (!options.dex_file) {
+ LOG(ERROR) << "Required argument '" << kDexFileOption << "' not provided.";
+ return 1;
+ }
+
gTargetSdkVersion = options.target_sdk_version;
std::vector<std::string> boot_content;
@@ -215,8 +229,20 @@
std::vector<std::unique_ptr<VeridexResolver>> app_resolvers;
Resolve(app_dex_files, resolver_map, type_map, &app_resolvers);
+ if (options.only_report_sdk_uses) {
+ // If we only need to report SDK uses, clear out any of the other lists so that
+ // the analysis don't report them.
+ options.blacklist = nullptr;
+ options.dark_greylist = nullptr;
+ options.light_greylist = nullptr;
+ } else {
+ // Otherwise, omit SDK uses.
+ options.whitelist = nullptr;
+ }
+
// Find and log uses of hidden APIs.
- HiddenApi hidden_api(options.blacklist, options.dark_greylist, options.light_greylist);
+ HiddenApi hidden_api(
+ options.whitelist, options.blacklist, options.dark_greylist, options.light_greylist);
HiddenApiStats stats;
HiddenApiFinder api_finder(hidden_api);
@@ -229,7 +255,7 @@
precise_api_finder.Dump(std::cout, &stats);
}
- DumpSummaryStats(std::cout, stats);
+ DumpSummaryStats(std::cout, stats, options);
if (options.precise) {
std::cout << "To run an analysis that can give more reflection accesses, " << std::endl
@@ -240,17 +266,23 @@
}
private:
- static void DumpSummaryStats(std::ostream& os, const HiddenApiStats& stats) {
+ static void DumpSummaryStats(std::ostream& os,
+ const HiddenApiStats& stats,
+ const VeridexOptions& options) {
static const char* kPrefix = " ";
- os << stats.count << " hidden API(s) used: "
- << stats.linking_count << " linked against, "
- << stats.reflection_count << " through reflection" << std::endl;
- os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kBlacklist]
- << " in blacklist" << std::endl;
- os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kDarkGreylist]
- << " in dark greylist" << std::endl;
- os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kLightGreylist]
- << " in light greylist" << std::endl;
+ if (options.only_report_sdk_uses) {
+ os << stats.api_counts[HiddenApiAccessFlags::kWhitelist] << " SDK API uses." << std::endl;
+ } else {
+ os << stats.count << " hidden API(s) used: "
+ << stats.linking_count << " linked against, "
+ << stats.reflection_count << " through reflection" << std::endl;
+ os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kBlacklist]
+ << " in blacklist" << std::endl;
+ os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kDarkGreylist]
+ << " in dark greylist" << std::endl;
+ os << kPrefix << stats.api_counts[HiddenApiAccessFlags::kLightGreylist]
+ << " in light greylist" << std::endl;
+ }
}
static bool Load(const std::string& filename,
@@ -312,4 +344,3 @@
int main(int argc, char** argv) {
return art::Veridex::Run(argc, argv);
}
-
diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h
index 31ddbf4..e0d8261 100644
--- a/tools/veridex/veridex.h
+++ b/tools/veridex/veridex.h
@@ -44,7 +44,6 @@
*/
class VeriClass {
public:
- VeriClass(const VeriClass& other) = default;
VeriClass() = default;
VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl)
: kind_(k), dimensions_(dims), class_def_(cl) {}