summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--cmdline/cmdline_parser_test.cc31
-rw-r--r--cmdline/cmdline_types.h18
-rw-r--r--compiler/Android.mk4
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc9
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h1
-rw-r--r--compiler/dex/quick/quick_compiler.cc4
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc8
-rw-r--r--compiler/image_writer.cc6
-rw-r--r--compiler/jni/jni_cfi_test_expected.inc131
-rw-r--r--compiler/jni/quick/jni_compiler.cc3
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc19
-rw-r--r--compiler/oat_writer.cc55
-rw-r--r--compiler/oat_writer.h11
-rw-r--r--compiler/optimizing/builder.cc13
-rw-r--r--compiler/optimizing/code_generator.cc54
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.cc191
-rw-r--r--compiler/optimizing/code_generator_arm.h9
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc240
-rw-r--r--compiler/optimizing/code_generator_x86.h7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc163
-rw-r--r--compiler/optimizing/code_generator_x86_64.h10
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.cc132
-rw-r--r--compiler/optimizing/constant_folding_test.cc4
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc38
-rw-r--r--compiler/optimizing/graph_checker.h17
-rw-r--r--compiler/optimizing/graph_checker_test.cc8
-rw-r--r--compiler/optimizing/instruction_simplifier.cc113
-rw-r--r--compiler/optimizing/intrinsics.cc3
-rw-r--r--compiler/optimizing/intrinsics.h20
-rw-r--r--compiler/optimizing/intrinsics_arm.cc302
-rw-r--r--compiler/optimizing/intrinsics_arm.h7
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc1
-rw-r--r--compiler/optimizing/intrinsics_list.h1
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc1
-rw-r--r--compiler/optimizing/intrinsics_x86.cc1
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc296
-rw-r--r--compiler/optimizing/nodes.cc19
-rw-r--r--compiler/optimizing/nodes.h34
-rw-r--r--compiler/optimizing/nodes_x86.h39
-rw-r--r--compiler/optimizing/optimizing_compiler.cc4
-rw-r--r--compiler/utils/arm/assembler_arm.h9
-rw-r--r--compiler/utils/arm/assembler_arm32.cc14
-rw-r--r--compiler/utils/arm/assembler_arm32.h4
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc13
-rw-r--r--compiler/utils/arm/assembler_thumb2.h5
-rw-r--r--compiler/utils/arm/constants_arm.h3
-rw-r--r--compiler/utils/assembler.cc7
-rw-r--r--compiler/utils/assembler.h4
-rw-r--r--compiler/utils/assembler_test.h97
-rw-r--r--compiler/utils/assembler_thumb_test.cc3
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc241
-rw-r--r--compiler/utils/label.h7
-rw-r--r--compiler/utils/mips/assembler_mips.cc1874
-rw-r--r--compiler/utils/mips/assembler_mips.h512
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc1324
-rw-r--r--compiler/utils/x86/assembler_x86.cc24
-rw-r--r--compiler/utils/x86/assembler_x86.h103
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc21
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h69
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--disassembler/disassembler_mips.cc18
-rw-r--r--oatdump/oatdump.cc48
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/arch_test.cc2
-rw-r--r--runtime/arch/arm/context_arm.cc4
-rw-r--r--runtime/arch/arm64/context_arm64.cc4
-rw-r--r--runtime/arch/instruction_set.h16
-rw-r--r--runtime/arch/mips/context_mips.cc4
-rw-r--r--runtime/arch/mips64/context_mips64.cc4
-rw-r--r--runtime/arch/stub_test.cc471
-rw-r--r--runtime/arch/x86/context_x86.cc7
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc7
-rw-r--r--runtime/art_code.cc333
-rw-r--r--runtime/art_code.h93
-rw-r--r--runtime/art_method-inl.h108
-rw-r--r--runtime/art_method.cc255
-rw-r--r--runtime/art_method.h148
-rw-r--r--runtime/base/arena_allocator.cc1
-rw-r--r--runtime/base/arena_allocator.h1
-rw-r--r--runtime/base/mutex.cc12
-rw-r--r--runtime/base/mutex.h10
-rw-r--r--runtime/check_reference_map_visitor.h7
-rw-r--r--runtime/class_linker.cc1175
-rw-r--r--runtime/class_linker.h123
-rw-r--r--runtime/common_runtime_test.cc3
-rw-r--r--runtime/dex_file_verifier.cc11
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h1
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc8
-rw-r--r--runtime/entrypoints/entrypoint_utils.h5
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc10
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc20
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc6
-rw-r--r--runtime/exception_test.cc6
-rw-r--r--runtime/experimental_flags.h88
-rw-r--r--runtime/fault_handler.cc10
-rw-r--r--runtime/gc/space/image_space.cc15
-rw-r--r--runtime/gc/space/image_space.h5
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc12
-rw-r--r--runtime/instrumentation.h5
-rw-r--r--runtime/interpreter/interpreter.cc7
-rw-r--r--runtime/interpreter/interpreter_common.cc2
-rw-r--r--runtime/interpreter/interpreter_common.h25
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc19
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc21
-rw-r--r--runtime/jvalue.h8
-rw-r--r--runtime/mem_map.cc20
-rw-r--r--runtime/mem_map.h2
-rw-r--r--runtime/mirror/class-inl.h3
-rw-r--r--runtime/mirror/class.h21
-rw-r--r--runtime/mirror/dex_cache_test.cc25
-rw-r--r--runtime/mirror/object.h10
-rw-r--r--runtime/modifiers.h5
-rw-r--r--runtime/monitor.cc6
-rw-r--r--runtime/monitor.h12
-rw-r--r--runtime/monitor_android.cc4
-rw-r--r--runtime/monitor_linux.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc166
-rw-r--r--runtime/native/dalvik_system_DexFile.h4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc3
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/oat.cc6
-rw-r--r--runtime/oat_file-inl.h16
-rw-r--r--runtime/oat_file.cc82
-rw-r--r--runtime/oat_file.h9
-rw-r--r--runtime/oat_file_assistant_test.cc23
-rw-r--r--runtime/oat_file_manager.cc394
-rw-r--r--runtime/oat_file_manager.h125
-rw-r--r--runtime/parsed_options.cc21
-rw-r--r--runtime/quick/inline_method_analyser.h1
-rw-r--r--runtime/quick_exception_handler.cc10
-rw-r--r--runtime/runtime.cc19
-rw-r--r--runtime/runtime.h16
-rw-r--r--runtime/runtime_linux.cc2
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/stack.cc188
-rw-r--r--runtime/stack.h80
-rw-r--r--runtime/thread.cc30
-rw-r--r--runtime/utils.cc9
-rw-r--r--runtime/utils.h11
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h15
-rw-r--r--runtime/utils/dex_cache_arrays_layout.h5
-rw-r--r--runtime/verifier/method_verifier.cc35
-rw-r--r--runtime/verifier/register_line.cc101
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc10
-rw-r--r--test/088-monitor-verification/src/Main.java43
-rw-r--r--test/088-monitor-verification/src/TwoPath.java53
-rw-r--r--test/131-structural-change/expected.txt1
-rw-r--r--test/131-structural-change/src/Main.java6
-rw-r--r--test/141-class-unload/expected.txt1
-rw-r--r--test/141-class-unload/src/Main.java24
-rw-r--r--test/454-get-vreg/get_vreg_jni.cc17
-rw-r--r--test/457-regs/regs_jni.cc5
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc3
-rw-r--r--test/537-checker-arraycopy/expected.txt0
-rw-r--r--test/537-checker-arraycopy/info.txt1
-rw-r--r--test/537-checker-arraycopy/src/Main.java71
-rw-r--r--test/538-checker-embed-constants/expected.txt0
-rw-r--r--test/538-checker-embed-constants/info.txt1
-rw-r--r--test/538-checker-embed-constants/src/Main.java290
-rwxr-xr-xtest/955-lambda-smali/run2
-rwxr-xr-xtest/960-default-smali/build33
-rw-r--r--test/960-default-smali/expected.txt84
-rw-r--r--test/960-default-smali/info.txt19
-rwxr-xr-xtest/960-default-smali/run21
-rw-r--r--test/960-default-smali/smali/A.smali38
-rw-r--r--test/960-default-smali/smali/Attendant.smali53
-rw-r--r--test/960-default-smali/smali/B.smali38
-rw-r--r--test/960-default-smali/smali/C.smali37
-rw-r--r--test/960-default-smali/smali/D.smali38
-rw-r--r--test/960-default-smali/smali/E.smali38
-rw-r--r--test/960-default-smali/smali/Extension.smali30
-rw-r--r--test/960-default-smali/smali/F.smali47
-rw-r--r--test/960-default-smali/smali/G.smali37
-rw-r--r--test/960-default-smali/smali/Greeter.smali40
-rw-r--r--test/960-default-smali/smali/Greeter2.smali39
-rw-r--r--test/960-default-smali/smali/Greeter3.smali40
-rw-r--r--test/960-default-smali/smali/H.smali28
-rw-r--r--test/960-default-smali/smali/I.smali28
-rw-r--r--test/960-default-smali/smali/J.smali29
-rw-r--r--test/960-default-smali/smali/classes.xml127
-rwxr-xr-xtest/960-default-smali/util-src/generate_smali.py376
-rwxr-xr-xtest/961-default-iface-resolution-generated/build47
-rw-r--r--test/961-default-iface-resolution-generated/expected.txt1
-rw-r--r--test/961-default-iface-resolution-generated/info.txt17
-rwxr-xr-xtest/961-default-iface-resolution-generated/run21
-rwxr-xr-xtest/961-default-iface-resolution-generated/util-src/generate_smali.py466
-rwxr-xr-xtest/962-iface-static/build30
-rw-r--r--test/962-iface-static/expected.txt3
-rw-r--r--test/962-iface-static/info.txt4
-rwxr-xr-xtest/962-iface-static/run21
-rw-r--r--test/962-iface-static/smali/Displayer.smali45
-rw-r--r--test/962-iface-static/smali/Main.smali40
-rw-r--r--test/962-iface-static/smali/iface.smali43
-rwxr-xr-xtest/963-default-range-smali/build30
-rw-r--r--test/963-default-range-smali/expected.txt2
-rw-r--r--test/963-default-range-smali/info.txt4
-rwxr-xr-xtest/963-default-range-smali/run21
-rw-r--r--test/963-default-range-smali/smali/A.smali29
-rw-r--r--test/963-default-range-smali/smali/Main.smali77
-rw-r--r--test/963-default-range-smali/smali/iface.smali40
-rwxr-xr-xtest/964-default-iface-init-generated/build45
-rw-r--r--test/964-default-iface-init-generated/expected.txt1
-rw-r--r--test/964-default-iface-init-generated/info.txt17
-rwxr-xr-xtest/964-default-iface-init-generated/run21
-rw-r--r--test/964-default-iface-init-generated/smali/Displayer.smali45
-rwxr-xr-xtest/964-default-iface-init-generated/util-src/generate_smali.py531
-rw-r--r--test/Android.run-test.mk28
-rwxr-xr-xtest/etc/default-build2
-rwxr-xr-xtest/run-all-tests5
-rwxr-xr-xtest/run-test14
-rw-r--r--test/utils/python/testgen/mixins.py135
-rw-r--r--test/utils/python/testgen/utils.py80
-rw-r--r--tools/ahat/README.txt5
-rw-r--r--tools/ahat/src/InstanceUtils.java61
-rwxr-xr-xtools/extract-embedded-java35
222 files changed, 11868 insertions, 2851 deletions
diff --git a/.gitignore b/.gitignore
index c4cf98b37c..4e806c6514 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
JIT_ART
+**/__pycache__/**
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9775f6a5d7..1b54a510fd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -65,6 +65,7 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
+ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
@@ -348,6 +349,7 @@ COMPILER_GTEST_HOST_SRC_FILES_arm64 := \
COMPILER_GTEST_HOST_SRC_FILES_mips := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips) \
+ compiler/utils/mips/assembler_mips_test.cc \
COMPILER_GTEST_HOST_SRC_FILES_mips64 := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 52df7deb25..f34b5edcc4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -21,6 +21,7 @@
#include "utils.h"
#include <numeric>
#include "gtest/gtest.h"
+#include "runtime/experimental_flags.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
reinterpret_cast<void*>(nullptr));
@@ -529,22 +530,32 @@ TEST_F(CmdlineParserTest, TestProfilerOptions) {
}
} // TEST_F
-/* -X[no]experimental-lambdas */
-TEST_F(CmdlineParserTest, TestExperimentalLambdas) {
+/* -Xexperimental:_ */
+TEST_F(CmdlineParserTest, TestExperimentalFlags) {
// Off by default
- EXPECT_SINGLE_PARSE_DEFAULT_VALUE(false,
+ EXPECT_SINGLE_PARSE_DEFAULT_VALUE(ExperimentalFlags::kNone,
"",
- M::ExperimentalLambdas);
+ M::Experimental);
// Disabled explicitly
- EXPECT_SINGLE_PARSE_VALUE(false,
- "-Xnoexperimental-lambdas",
- M::ExperimentalLambdas);
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone,
+ "-Xexperimental:none",
+ M::Experimental);
// Enabled explicitly
- EXPECT_SINGLE_PARSE_VALUE(true,
- "-Xexperimental-lambdas",
- M::ExperimentalLambdas);
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kLambdas,
+ "-Xexperimental:lambdas",
+ M::Experimental);
+ // Enabled explicitly
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kDefaultMethods,
+ "-Xexperimental:default-methods",
+ M::Experimental);
+
+ // Enabled both
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kDefaultMethods | ExperimentalFlags::kLambdas,
+ "-Xexperimental:default-methods "
+ "-Xexperimental:lambdas",
+ M::Experimental);
}
// -Xverify:_
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index a57b6196de..c594adbc94 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -28,6 +28,7 @@
#include "jdwp/jdwp.h"
#include "runtime/base/logging.h"
#include "runtime/base/time_utils.h"
+#include "runtime/experimental_flags.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
@@ -838,6 +839,23 @@ struct CmdlineType<TestProfilerOptions> : CmdlineTypeParser<TestProfilerOptions>
static constexpr bool kCanParseBlankless = true;
};
+template<>
+struct CmdlineType<ExperimentalFlags> : CmdlineTypeParser<ExperimentalFlags> {
+ Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) {
+ if (option == "none") {
+ existing = existing | ExperimentalFlags::kNone;
+ } else if (option == "lambdas") {
+ existing = existing | ExperimentalFlags::kLambdas;
+ } else if (option == "default-methods") {
+ existing = existing | ExperimentalFlags::kDefaultMethods;
+ } else {
+ return Result::Failure(std::string("Unknown option '") + option + "'");
+ }
+ return Result::SuccessNoValue();
+ }
+
+ static const char* Name() { return "ExperimentalFlags"; }
+};
} // namespace art
#endif // ART_CMDLINE_CMDLINE_TYPES_H_
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 96e13ac9a3..20c80235ba 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -66,6 +66,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_utils.cc \
+ optimizing/constant_area_fixups_x86.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
@@ -210,7 +211,8 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
dex/quick/arm64/arm64_lir.h
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
- dex/quick/mips/mips_lir.h
+ dex/quick/mips/mips_lir.h \
+ utils/mips/assembler_mips.h
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips)
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index af93aabc91..e1a2838f3e 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -73,6 +73,7 @@ static constexpr bool kIntrinsicIsStatic[] = {
false, // kIntrinsicUnsafeGet
false, // kIntrinsicUnsafePut
true, // kIntrinsicSystemArrayCopyCharArray
+ true, // kIntrinsicSystemArrayCopy
};
static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop,
"arraysize of kIntrinsicIsStatic unexpected");
@@ -121,6 +122,8 @@ static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet_must_not_be_s
static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static");
static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
"SystemArrayCopyCharArray must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopy],
+ "SystemArrayCopy must be static");
MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
@@ -326,6 +329,9 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
// kProtoCacheCharArrayICharArrayII_V
{ kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt,
kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} },
+ // kProtoCacheObjectIObjectII_V
+ { kClassCacheVoid, 5, {kClassCacheJavaLangObject, kClassCacheInt,
+ kClassCacheJavaLangObject, kClassCacheInt, kClassCacheInt} },
// kProtoCacheIICharArrayI_V
{ kClassCacheVoid, 4, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray,
kClassCacheInt } },
@@ -481,6 +487,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
0),
+ INTRINSIC(JavaLangSystem, ArrayCopy, ObjectIObjectII_V , kIntrinsicSystemArrayCopy,
+ 0),
INTRINSIC(JavaLangInteger, RotateRight, II_I, kIntrinsicRotateRight, k32),
INTRINSIC(JavaLangLong, RotateRight, JI_J, kIntrinsicRotateRight, k64),
@@ -653,6 +661,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
case kIntrinsicNumberOfTrailingZeros:
case kIntrinsicRotateRight:
case kIntrinsicRotateLeft:
+ case kIntrinsicSystemArrayCopy:
return false; // not implemented in quick.
default:
LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 8458806e5e..5ce110c120 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -259,6 +259,7 @@ class DexFileMethodInliner {
kProtoCacheObjectJ_Object,
kProtoCacheObjectJObject_V,
kProtoCacheCharArrayICharArrayII_V,
+ kProtoCacheObjectIObjectII_V,
kProtoCacheIICharArrayI_V,
kProtoCacheByteArrayIII_String,
kProtoCacheIICharArray_String,
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 1cd742abac..c2fe5538b7 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -37,6 +37,7 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "elf_writer_quick.h"
+#include "experimental_flags.h"
#include "jni/quick/jni_compiler.h"
#include "mir_to_lir.h"
#include "mirror/object.h"
@@ -523,7 +524,8 @@ static bool SkipScanningUnsupportedOpcodes(InstructionSet instruction_set) {
// All opcodes are supported no matter what. Usually not the case
// since experimental opcodes are not implemented in the quick compiler.
return true;
- } else if (LIKELY(!Runtime::Current()->AreExperimentalLambdasEnabled())) {
+ } else if (LIKELY(!Runtime::Current()->
+ AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas))) {
// Experimental opcodes are disabled.
//
// If all unsupported opcodes are experimental we don't need to do scanning.
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index e535afd272..1a7dbe3a9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -370,7 +370,9 @@ inline int CompilerDriver::IsFastInvoke(
nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file)));
+ auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ *devirt_target->dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
called_method = class_linker->ResolveMethod(
*devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
class_loader, nullptr, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 74f19a1029..8324bf30d6 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -953,7 +953,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
StackHandleScope<2> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(
+ *dex_file,
+ Runtime::Current()->GetLinearAlloc())));
Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
@@ -2010,9 +2012,11 @@ class ResolveTypeVisitor : public CompilationVisitor {
ClassLinker* class_linker = manager_->GetClassLinker();
const DexFile& dex_file = *manager_->GetDexFile();
StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(dex_file)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == nullptr) {
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d9f8fcb43a..4310be6464 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -55,6 +55,7 @@
#include "mirror/string-inl.h"
#include "oat.h"
#include "oat_file.h"
+#include "oat_file_manager.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
@@ -126,8 +127,6 @@ bool ImageWriter::Write(const std::string& image_filename,
const std::string& oat_location) {
CHECK(!image_filename.empty());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
@@ -141,7 +140,8 @@ bool ImageWriter::Write(const std::string& image_filename,
oat_file->Erase();
return false;
}
- CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_);
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(
+ std::unique_ptr<const OatFile>(oat_file_));
interpreter_to_interpreter_bridge_offset_ =
oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 8b5fdc3fcb..5af2242963 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -325,84 +325,73 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000007f: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kMips[] = {
- 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB8, 0xAF,
- 0x34, 0x00, 0xAF, 0xAF, 0x30, 0x00, 0xAE, 0xAF, 0x2C, 0x00, 0xAD, 0xAF,
- 0x28, 0x00, 0xAC, 0xAF, 0x24, 0x00, 0xAB, 0xAF, 0x20, 0x00, 0xAA, 0xAF,
- 0x1C, 0x00, 0xA9, 0xAF, 0x18, 0x00, 0xA8, 0xAF, 0x00, 0x00, 0xA4, 0xAF,
- 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7, 0x4C, 0x00, 0xA6, 0xAF,
- 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27,
- 0x18, 0x00, 0xA8, 0x8F, 0x1C, 0x00, 0xA9, 0x8F, 0x20, 0x00, 0xAA, 0x8F,
- 0x24, 0x00, 0xAB, 0x8F, 0x28, 0x00, 0xAC, 0x8F, 0x2C, 0x00, 0xAD, 0x8F,
- 0x30, 0x00, 0xAE, 0x8F, 0x34, 0x00, 0xAF, 0x8F, 0x38, 0x00, 0xB8, 0x8F,
- 0x3C, 0x00, 0xBF, 0x8F, 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03,
- 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xBE, 0xAF,
+ 0x34, 0x00, 0xB7, 0xAF, 0x30, 0x00, 0xB6, 0xAF, 0x2C, 0x00, 0xB5, 0xAF,
+ 0x28, 0x00, 0xB4, 0xAF, 0x24, 0x00, 0xB3, 0xAF, 0x20, 0x00, 0xB2, 0xAF,
+ 0x00, 0x00, 0xA4, 0xAF, 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7,
+ 0x4C, 0x00, 0xA6, 0xAF, 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27,
+ 0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F,
+ 0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F,
+ 0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
+ 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
static constexpr uint8_t expected_cfi_kMips[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x98, 0x02, 0x44, 0x8F, 0x03,
- 0x44, 0x8E, 0x04, 0x44, 0x8D, 0x05, 0x44, 0x8C, 0x06, 0x44, 0x8B, 0x07,
- 0x44, 0x8A, 0x08, 0x44, 0x89, 0x09, 0x44, 0x88, 0x0A, 0x58, 0x0E, 0x60,
- 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xC8, 0x44, 0xC9, 0x44, 0xCA, 0x44, 0xCB,
- 0x44, 0xCC, 0x44, 0xCD, 0x44, 0xCE, 0x44, 0xCF, 0x44, 0xD8, 0x44, 0xDF,
- 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03,
+ 0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07,
+ 0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2,
+ 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE,
+ 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: sw r31, +60(r29)
// 0x00000008: .cfi_offset: r31 at cfa-4
-// 0x00000008: sw r24, +56(r29)
-// 0x0000000c: .cfi_offset: r24 at cfa-8
-// 0x0000000c: sw r15, +52(r29)
-// 0x00000010: .cfi_offset: r15 at cfa-12
-// 0x00000010: sw r14, +48(r29)
-// 0x00000014: .cfi_offset: r14 at cfa-16
-// 0x00000014: sw r13, +44(r29)
-// 0x00000018: .cfi_offset: r13 at cfa-20
-// 0x00000018: sw r12, +40(r29)
-// 0x0000001c: .cfi_offset: r12 at cfa-24
-// 0x0000001c: sw r11, +36(r29)
-// 0x00000020: .cfi_offset: r11 at cfa-28
-// 0x00000020: sw r10, +32(r29)
-// 0x00000024: .cfi_offset: r10 at cfa-32
-// 0x00000024: sw r9, +28(r29)
-// 0x00000028: .cfi_offset: r9 at cfa-36
-// 0x00000028: sw r8, +24(r29)
-// 0x0000002c: .cfi_offset: r8 at cfa-40
-// 0x0000002c: sw r4, +0(r29)
-// 0x00000030: sw r5, +68(r29)
-// 0x00000034: swc1 f12, +72(r29)
-// 0x00000038: sw r6, +76(r29)
-// 0x0000003c: sw r7, +80(r29)
-// 0x00000040: addiu r29, r29, -32
-// 0x00000044: .cfi_def_cfa_offset: 96
-// 0x00000044: addiu r29, r29, 32
-// 0x00000048: .cfi_def_cfa_offset: 64
-// 0x00000048: .cfi_remember_state
-// 0x00000048: lw r8, +24(r29)
-// 0x0000004c: .cfi_restore: r8
-// 0x0000004c: lw r9, +28(r29)
-// 0x00000050: .cfi_restore: r9
-// 0x00000050: lw r10, +32(r29)
-// 0x00000054: .cfi_restore: r10
-// 0x00000054: lw r11, +36(r29)
-// 0x00000058: .cfi_restore: r11
-// 0x00000058: lw r12, +40(r29)
-// 0x0000005c: .cfi_restore: r12
-// 0x0000005c: lw r13, +44(r29)
-// 0x00000060: .cfi_restore: r13
-// 0x00000060: lw r14, +48(r29)
-// 0x00000064: .cfi_restore: r14
-// 0x00000064: lw r15, +52(r29)
-// 0x00000068: .cfi_restore: r15
-// 0x00000068: lw r24, +56(r29)
-// 0x0000006c: .cfi_restore: r24
-// 0x0000006c: lw r31, +60(r29)
-// 0x00000070: .cfi_restore: r31
-// 0x00000070: addiu r29, r29, 64
-// 0x00000074: .cfi_def_cfa_offset: 0
-// 0x00000074: jr r31
-// 0x00000078: nop
-// 0x0000007c: .cfi_restore_state
-// 0x0000007c: .cfi_def_cfa_offset: 64
+// 0x00000008: sw r30, +56(r29)
+// 0x0000000c: .cfi_offset: r30 at cfa-8
+// 0x0000000c: sw r23, +52(r29)
+// 0x00000010: .cfi_offset: r23 at cfa-12
+// 0x00000010: sw r22, +48(r29)
+// 0x00000014: .cfi_offset: r22 at cfa-16
+// 0x00000014: sw r21, +44(r29)
+// 0x00000018: .cfi_offset: r21 at cfa-20
+// 0x00000018: sw r20, +40(r29)
+// 0x0000001c: .cfi_offset: r20 at cfa-24
+// 0x0000001c: sw r19, +36(r29)
+// 0x00000020: .cfi_offset: r19 at cfa-28
+// 0x00000020: sw r18, +32(r29)
+// 0x00000024: .cfi_offset: r18 at cfa-32
+// 0x00000024: sw r4, +0(r29)
+// 0x00000028: sw r5, +68(r29)
+// 0x0000002c: swc1 f12, +72(r29)
+// 0x00000030: sw r6, +76(r29)
+// 0x00000034: sw r7, +80(r29)
+// 0x00000038: addiu r29, r29, -32
+// 0x0000003c: .cfi_def_cfa_offset: 96
+// 0x0000003c: addiu r29, r29, 32
+// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000040: .cfi_remember_state
+// 0x00000040: lw r18, +32(r29)
+// 0x00000044: .cfi_restore: r18
+// 0x00000044: lw r19, +36(r29)
+// 0x00000048: .cfi_restore: r19
+// 0x00000048: lw r20, +40(r29)
+// 0x0000004c: .cfi_restore: r20
+// 0x0000004c: lw r21, +44(r29)
+// 0x00000050: .cfi_restore: r21
+// 0x00000050: lw r22, +48(r29)
+// 0x00000054: .cfi_restore: r22
+// 0x00000054: lw r23, +52(r29)
+// 0x00000058: .cfi_restore: r23
+// 0x00000058: lw r30, +56(r29)
+// 0x0000005c: .cfi_restore: r30
+// 0x0000005c: lw r31, +60(r29)
+// 0x00000060: .cfi_restore: r31
+// 0x00000060: addiu r29, r29, 64
+// 0x00000064: .cfi_def_cfa_offset: 0
+// 0x00000064: jr r31
+// 0x00000068: nop
+// 0x0000006c: .cfi_restore_state
+// 0x0000006c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 953dfcb2c3..34f0802444 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -67,6 +67,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
InstructionSet instruction_set = driver->GetInstructionSet();
+ const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
// Calling conventions used to iterate over parameters to method
std::unique_ptr<JniCallingConvention> main_jni_conv(
@@ -93,7 +94,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
// Assembler that holds generated instructions
- std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set, instruction_set_features));
jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetGenerateDebugInfo());
// Offsets into data structures
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index be2397f518..ecf143d8f5 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -162,22 +162,19 @@ MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synch
}
padding_ = padding;
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T0));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T1));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T2));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T3));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T4));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T5));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T6));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T7));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T8));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S2));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S3));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S4));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S5));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S6));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S7));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(FP));
}
uint32_t MipsJniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
- result = 1 << T0 | 1 << T1 | 1 << T2 | 1 << T3 | 1 << T4 | 1 << T5 | 1 << T6 |
- 1 << T7 | 1 << T8 | 1 << RA;
+ result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << FP | 1 << RA;
return result;
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7b410bfe37..a78a5b3644 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -31,6 +31,7 @@
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "image_writer.h"
#include "linker/relative_patcher.h"
@@ -43,6 +44,7 @@
#include "safe_map.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -143,6 +145,18 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
}
size_ = offset;
+ if (!HasImage()) {
+ // Allocate space for app dex cache arrays in the .bss section.
+ size_t bss_start = RoundUp(size_, kPageSize);
+ size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ bss_size_ = 0u;
+ for (const DexFile* dex_file : dex_files) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
+ }
+
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
CHECK_EQ(compiler->IsImage(), image_writer_ != nullptr);
CHECK_EQ(compiler->IsImage(),
@@ -655,10 +669,10 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
no_thread_suspension_(soa_.Self(), "OatWriter patching"),
class_linker_(Runtime::Current()->GetClassLinker()),
dex_cache_(nullptr) {
- if (writer_->image_writer_ != nullptr) {
+ patched_code_.reserve(16 * KB);
+ if (writer_->HasImage()) {
// If we're creating the image, the address space must be ready so that we can apply patches.
CHECK(writer_->image_writer_->IsImageAddressSpaceReady());
- patched_code_.reserve(16 * KB);
}
}
@@ -841,24 +855,28 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
const uint8_t* oat_data = writer_->image_writer_->GetOatFileBegin() + file_offset_;
return element - oat_data;
} else {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
+ size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
+ return start + patch.TargetDexCacheElementOffset();
}
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
object = writer_->image_writer_->GetImageAddress(object);
+ } else {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the object is in the image space.
+ DCHECK(Runtime::Current()->GetHeap()->FindSpaceFromObject(object, false)->IsImageSpace());
}
+ // Note: We only patch targeting Objects in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(object);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -870,12 +888,17 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
method = writer_->image_writer_->GetImageMethodAddress(method);
+ } else if (kIsDebugBuild) {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the method is an image method.
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
}
- // Note: We only patch ArtMethods to low 4gb since thats where the image is.
+ // Note: We only patch targeting ArtMethods in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(method);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -887,9 +910,11 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
- PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
- writer_->oat_data_offset_ + target_offset);
+ uint32_t address = target_offset;
+ if (writer_->HasImage()) {
+ address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
+ writer_->oat_data_offset_ + target_offset);
+ }
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
data[0] = address & 0xffu;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 48fbc0b2ac..a82d09eedd 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -90,6 +90,13 @@ class OatWriter {
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store);
+ // Returns whether the oat file has an associated image.
+ bool HasImage() const {
+ // Since the image is being created at the same time as the oat file,
+ // check if there's an image writer.
+ return image_writer_ != nullptr;
+ }
+
const OatHeader& GetOatHeader() const {
return *oat_header_;
}
@@ -272,6 +279,10 @@ class OatWriter {
// The size of the required .bss section holding the DexCache data.
size_t bss_size_;
+ // Offsets of the dex cache arrays for each app dex file. For the
+ // boot image, this information is provided by the ImageWriter.
+ SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_;
+
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 503d08f6f5..21540e8ed7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1175,10 +1175,9 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register
verified_method->GetStringInitPcRegMap();
auto map_it = string_init_map.find(dex_pc);
if (map_it != string_init_map.end()) {
- std::set<uint32_t> reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ for (uint32_t reg : map_it->second) {
HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot, dex_pc);
- UpdateLocal(*set_it, load_local, dex_pc);
+ UpdateLocal(reg, load_local, dex_pc);
}
}
} else {
@@ -1302,7 +1301,13 @@ bool HGraphBuilder::IsOutermostCompilingClass(uint16_t type_index) const {
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- return outer_class.Get() == cls.Get();
+ // GetOutermostCompilingClass returns null when the class is unresolved
+ // (e.g. if it derives from an unresolved class). This is bogus knowing that
+ // we are compiling it.
+ // When this happens we cannot establish a direct relation between the current
+ // class and the outer class, so we return false.
+ // (Note that this is only used for optimizing invokes and field accesses)
+ return (cls.Get() != nullptr) && (outer_class.Get() == cls.Get());
}
void HGraphBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 28970062cc..6a743ebbc9 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -41,6 +41,7 @@
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
#include "graph_visualizer.h"
+#include "intrinsics.h"
#include "leb128.h"
#include "mapping_table.h"
#include "mirror/array-inl.h"
@@ -1381,4 +1382,57 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
}
+void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
+ // Check to see if we have known failures that will cause us to have to bail out
+ // to the runtime, and just generate the runtime call directly.
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+
+ // The positions must be non-negative.
+ if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+ (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+ // We will have to fail anyways.
+ return;
+ }
+
+ // The length must be >= 0.
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+ if (length != nullptr) {
+ int32_t len = length->GetValue();
+ if (len < 0) {
+ // Just call as normal.
+ return;
+ }
+ }
+
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (optimizations.GetDestinationIsSource()) {
+ if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
+ // We only support backward copying if source and destination are the same.
+ return;
+ }
+ }
+
+ if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
+ // We currently don't intrinsify primitive copying.
+ return;
+ }
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
+ locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
+
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index acce5b3359..b04dfc00b2 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -421,6 +421,8 @@ class CodeGenerator {
Location runtime_type_index_location,
Location runtime_return_location);
+ static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
+
void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cac74c8da6..1df6818549 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1703,6 +1703,7 @@ void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+ codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -1742,6 +1743,7 @@ void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+ codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -3570,6 +3572,47 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI
}
}
+Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
+ Opcode opcode) {
+ DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
+ if (constant->IsConstant() &&
+ CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
+ return Location::ConstantLocation(constant->AsConstant());
+ }
+ return Location::RequiresRegister();
+}
+
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(HConstant* input_cst,
+ Opcode opcode) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
+ if (Primitive::Is64BitType(input_cst->GetType())) {
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
+ CanEncodeConstantAsImmediate(High32Bits(value), opcode);
+ } else {
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
+ }
+}
+
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
+ ShifterOperand so;
+ ArmAssembler* assembler = codegen_->GetAssembler();
+ if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
+ return true;
+ }
+ Opcode neg_opcode = kNoOperand;
+ switch (opcode) {
+ case AND:
+ neg_opcode = BIC;
+ break;
+ case ORR:
+ neg_opcode = ORN;
+ break;
+ default:
+ return false;
+ }
+ return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
+}
+
void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -4970,17 +5013,18 @@ void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instr
nullptr);
}
-void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
-void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
-void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction, AND); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction, ORR); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction, EOR); }
-void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == Primitive::kPrimInt
|| instruction->GetResultType() == Primitive::kPrimLong);
+ // Note: GVN reorders commutative operations to have the constant on the right hand side.
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -4996,48 +5040,131 @@ void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
HandleBitwiseOperation(instruction);
}
+void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first, uint32_t value) {
+ // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier).
+ if (value == 0xffffffffu) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ if (value == 0u) {
+ __ mov(out, ShifterOperand(0));
+ return;
+ }
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, AND, value, &so)) {
+ __ and_(out, first, so);
+ } else {
+ DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so));
+ __ bic(out, first, ShifterOperand(~value));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateOrrConst(Register out, Register first, uint32_t value) {
+ // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier).
+ if (value == 0u) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ if (value == 0xffffffffu) {
+ __ mvn(out, ShifterOperand(0));
+ return;
+ }
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORR, value, &so)) {
+ __ orr(out, first, so);
+ } else {
+ DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORN, ~value, &so));
+ __ orn(out, first, ShifterOperand(~value));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateEorConst(Register out, Register first, uint32_t value) {
+ // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier).
+ if (value == 0u) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ __ eor(out, first, ShifterOperand(value));
+}
+
void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ uint32_t value_low = Low32Bits(value);
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first_reg = first.AsRegister<Register>();
+ Register out_reg = out.AsRegister<Register>();
+ if (instruction->IsAnd()) {
+ GenerateAndConst(out_reg, first_reg, value_low);
+ } else if (instruction->IsOr()) {
+ GenerateOrrConst(out_reg, first_reg, value_low);
+ } else {
+ DCHECK(instruction->IsXor());
+ GenerateEorConst(out_reg, first_reg, value_low);
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ uint32_t value_high = High32Bits(value);
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
+ if (instruction->IsAnd()) {
+ GenerateAndConst(out_low, first_low, value_low);
+ GenerateAndConst(out_high, first_high, value_high);
+ } else if (instruction->IsOr()) {
+ GenerateOrrConst(out_low, first_low, value_low);
+ GenerateOrrConst(out_high, first_high, value_high);
+ } else {
+ DCHECK(instruction->IsXor());
+ GenerateEorConst(out_low, first_low, value_low);
+ GenerateEorConst(out_high, first_high, value_high);
+ }
+ }
+ return;
+ }
if (instruction->GetResultType() == Primitive::kPrimInt) {
- Register first = locations->InAt(0).AsRegister<Register>();
- Register second = locations->InAt(1).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
+ Register first_reg = first.AsRegister<Register>();
+ ShifterOperand second_reg(second.AsRegister<Register>());
+ Register out_reg = out.AsRegister<Register>();
if (instruction->IsAnd()) {
- __ and_(out, first, ShifterOperand(second));
+ __ and_(out_reg, first_reg, second_reg);
} else if (instruction->IsOr()) {
- __ orr(out, first, ShifterOperand(second));
+ __ orr(out_reg, first_reg, second_reg);
} else {
DCHECK(instruction->IsXor());
- __ eor(out, first, ShifterOperand(second));
+ __ eor(out_reg, first_reg, second_reg);
}
} else {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
- Location first = locations->InAt(0);
- Location second = locations->InAt(1);
- Location out = locations->Out();
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ ShifterOperand second_low(second.AsRegisterPairLow<Register>());
+ ShifterOperand second_high(second.AsRegisterPairHigh<Register>());
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
if (instruction->IsAnd()) {
- __ and_(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ and_(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ and_(out_low, first_low, second_low);
+ __ and_(out_high, first_high, second_high);
} else if (instruction->IsOr()) {
- __ orr(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ orr(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ orr(out_low, first_low, second_low);
+ __ orr(out_high, first_high, second_high);
} else {
DCHECK(instruction->IsXor());
- __ eor(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ eor(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ eor(out_low, first_low, second_low);
+ __ eor(out_high, first_high, second_high);
}
}
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 16d1d383b4..6900933e87 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -169,11 +169,15 @@ class LocationsBuilderARM : public HGraphVisitor {
private:
void HandleInvoke(HInvoke* invoke);
- void HandleBitwiseOperation(HBinaryOperation* operation);
+ void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
+
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitorARM parameter_visitor_;
@@ -205,6 +209,9 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg);
+ void GenerateAndConst(Register out, Register first, uint32_t value);
+ void GenerateOrrConst(Register out, Register first, uint32_t value);
+ void GenerateEorConst(Register out, Register first, uint32_t value);
void HandleBitwiseOperation(HBinaryOperation* operation);
void HandleShift(HBinaryOperation* operation);
void GenerateMemoryBarrier(MemBarrierKind kind);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 5e8f9e7f30..7799437235 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -117,7 +117,7 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
return Location::RegisterLocation(A0);
}
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
- return Location::RegisterLocation(A0);
+ return Location::RegisterLocation(V0);
}
Location GetSetValueLocation(
Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f385afaa7f..9de159251f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -532,7 +532,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5724,6 +5725,51 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
+void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Constant area pointer.
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // And the temporary we need.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Optimizing has a jump area.
+ Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+
+ // Remove the bias, if needed.
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg, -lower_bound));
+ value_reg = temp_reg;
+ }
+
+ // Is the value in range?
+ DCHECK_GE(num_entries, 1);
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load (target-constant_area) from the jump table, indexing by the value.
+ __ movl(temp_reg, codegen_->LiteralCaseTable(switch_instr, constant_area, value_reg));
+
+ // Compute the actual target address by adding in constant_area.
+ __ addl(temp_reg, constant_area);
+
+ // And jump.
+ __ jmp(temp_reg);
+}
+
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
@@ -5807,28 +5853,18 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
}
}
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
- // Generate the constant area if needed.
- X86Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values.
- assembler->Align(4, 0);
- constant_area_start_ = assembler->CodeSize();
- assembler->AddConstantArea();
- }
-
- // And finish up.
- CodeGenerator::Finalize(allocator);
-}
-
/**
* Class to handle late fixup of offsets into constant area.
*/
class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
- RIPFixup(const CodeGeneratorX86& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
+ RIPFixup(CodeGeneratorX86& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86* codegen_;
private:
void Process(const MemoryRegion& region, int pos) OVERRIDE {
@@ -5836,19 +5872,77 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
- int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
}
- const CodeGeneratorX86& codegen_;
-
// Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
+ int32_t offset_into_constant_area_;
+};
+
+/**
+ * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86& codegen, HX86PackedSwitch* switch_instr)
+ : RIPFixup(codegen, static_cast<size_t>(-1)), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // The label values in the jump table are computed relative to the
+ // instruction addressing the constant area.
+ const int32_t relative_offset = codegen_->GetMethodAddressOffset();
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - relative_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HX86PackedSwitch* switch_instr_;
};
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
@@ -5869,98 +5963,18 @@ Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
return Address(reg, kDummy32BitOffset, fixup);
}
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class ConstantHandlerVisitor : public HGraphVisitor {
- public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
-
- private:
- void VisitAdd(HAdd* add) OVERRIDE {
- BinaryFP(add);
- }
-
- void VisitSub(HSub* sub) OVERRIDE {
- BinaryFP(sub);
- }
-
- void VisitMul(HMul* mul) OVERRIDE {
- BinaryFP(mul);
- }
-
- void VisitDiv(HDiv* div) OVERRIDE {
- BinaryFP(div);
- }
-
- void VisitReturn(HReturn* ret) OVERRIDE {
- HConstant* value = ret->InputAt(0)->AsConstant();
- if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
- ReplaceInput(ret, value, 0, true);
- }
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void BinaryFP(HBinaryOperation* bin) {
- HConstant* rhs = bin->InputAt(1)->AsConstant();
- if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
- ReplaceInput(bin, rhs, 1, false);
- }
- }
-
- void InitializeConstantAreaPointer(HInstruction* user) {
- // Ensure we only initialize the pointer once.
- if (base_ != nullptr) {
- return;
- }
+Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
+ Register reg,
+ Register value) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
- HGraph* graph = GetGraph();
- HBasicBlock* entry = graph->GetEntryBlock();
- base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
- HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
- entry->InsertInstructionBefore(base_, insert_pos);
- DCHECK(base_ != nullptr);
- }
-
- void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
- HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
- insn->ReplaceInput(load_constant, input_index);
- }
-
- void HandleInvoke(HInvoke* invoke) {
- // Ensure that we can load FP arguments from the constant area.
- for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
- HConstant* input = invoke->InputAt(i)->AsConstant();
- if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
- ReplaceInput(invoke, input, i, true);
- }
- }
- }
-
- // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
- // input to the HX86LoadFromConstantTable instructions.
- HX86ComputeBaseMethodAddress* base_;
-};
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
- visitor.VisitInsertionOrder();
+ // We want a scaled address, as we are extracting the correct offset from the table.
+ return Address(reg, value, TIMES_4, kDummy32BitOffset, table_fixup);
}
// TODO: target as memory.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ae2d84f945..fdfc5ab69b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
};
+class JumpTableRIPFixup;
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
@@ -385,6 +387,8 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralInt32Address(int32_t v, Register reg);
Address LiteralInt64Address(int64_t v, Register reg);
+ Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
private:
@@ -405,6 +409,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Used for fixups to the constant area.
int32_t constant_area_start_;
+ // Fixups for jump tables that need to be patched after the constant table is generated.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
// If there is a HX86ComputeBaseMethodAddress instruction in the graph
// (which shall be the sole instruction of this kind), subtracting this offset
// from the value contained in the out register of this HX86ComputeBaseMethodAddress
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5757787f98..bedaa5577b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -36,9 +36,6 @@ namespace art {
namespace x86_64 {
-// Some x86_64 instructions require a register to be available as temp.
-static constexpr Register TMP = R11;
-
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = RDI;
@@ -679,7 +676,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -5363,31 +5361,43 @@ void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
int32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
- CpuRegister value_reg = locations->InAt(0).AsRegister<CpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+ // Remove the bias, if needed.
+ Register value_reg_out = value_reg_in.AsRegister();
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg_in, -lower_bound));
+ value_reg_out = temp_reg.AsRegister();
}
+ CpuRegister value_reg(value_reg_out);
- // And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
- }
+ // Is the value in range?
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the address of the jump table in the constant area.
+ __ leaq(base_reg, codegen_->LiteralCaseTable(switch_instr));
+
+ // Load the (signed) offset from the jump table.
+ __ movsxd(temp_reg, Address(base_reg, value_reg, TIMES_4, 0));
+
+ // Add the offset to the address of the table base.
+ __ addq(temp_reg, base_reg);
+
+ // And jump.
+ __ jmp(temp_reg);
}
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
@@ -5413,15 +5423,85 @@ void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
}
}
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ RIPFixup(CodeGeneratorX86_64& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86_64* codegen_;
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. We use the address of the
+ // 'next' instruction, which is 'pos' (patch the 4 bytes before).
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - pos;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ // Location in constant area that the fixup refers to.
+ size_t offset_into_constant_area_;
+};
+
+/**
+ t * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86_64& codegen, HPackedSwitch* switch_instr)
+ : RIPFixup(codegen, -1), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // Compute the offset from the start of the function to this jump table.
+ const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table;
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - current_table_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HPackedSwitch* switch_instr_;
+};
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values. If used for vectors at a later time, this will need to be
- // updated to 16 bytes with the appropriate offset.
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8 byte values.
assembler->Align(4, 0);
constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
assembler->AddConstantArea();
}
@@ -5429,31 +5509,6 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-/**
- * Class to handle late fixup of offsets into constant area.
- */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
- public:
- RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
-
- private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
- // Patch the correct offset for the instruction. We use the address of the
- // 'next' instruction, which is 'pos' (patch the 4 bytes before).
- int constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int relative_position = constant_offset - pos;
-
- // Patch in the right value.
- region.StoreUnaligned<int32_t>(pos - 4, relative_position);
- }
-
- const CodeGeneratorX86_64& codegen_;
-
- // Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
-};
-
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
@@ -5494,6 +5549,16 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type t
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
+ return Address::RIP(table_fixup);
+}
+
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index ecc8630e6b..dc86a48ce7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -30,6 +30,9 @@ namespace x86_64 {
// Use a local definition to prevent copying mistakes.
static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
+// Some x86_64 instructions require a register to be available as temp.
+static constexpr Register TMP = R11;
+
static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
static constexpr FloatRegister kParameterFloatRegisters[] =
{ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
@@ -231,6 +234,9 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
};
+// Class for fixups to jump tables.
+class JumpTableRIPFixup;
+
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
@@ -351,6 +357,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Load a 64 bit value into a register in the most efficient manner.
void Load64BitValue(CpuRegister dest, int64_t value);
+ Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
@@ -388,6 +395,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// We will fix this up in the linker later to have the right value.
static constexpr int32_t kDummy32BitOffset = 256;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/constant_area_fixups_x86.cc
new file mode 100644
index 0000000000..c3470002c5
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_area_fixups_x86.h"
+
+namespace art {
+namespace x86 {
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
+ // address the constant area.
+ InitializeConstantAreaPointer(switch_insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 694f7687ba..b2e222f1a9 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -51,7 +51,7 @@ static void TestCode(const uint16_t* data,
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HConstantFolding(graph).Run();
- SSAChecker ssa_checker_cf(&allocator, graph);
+ SSAChecker ssa_checker_cf(graph);
ssa_checker_cf.Run();
ASSERT_TRUE(ssa_checker_cf.IsValid());
@@ -63,7 +63,7 @@ static void TestCode(const uint16_t* data,
check_after_cf(graph);
HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker_dce(&allocator, graph);
+ SSAChecker ssa_checker_dce(graph);
ssa_checker_dce.Run();
ASSERT_TRUE(ssa_checker_dce.IsValid());
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index ee3a61aa0c..cf0a4acd4a 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -45,7 +45,7 @@ static void TestCode(const uint16_t* data,
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker(&allocator, graph);
+ SSAChecker ssa_checker(graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 89da1b1fbe..3de96b5d84 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -16,10 +16,12 @@
#include "graph_checker.h"
+#include <algorithm>
#include <map>
#include <string>
#include <sstream>
+#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/stringprintf.h"
@@ -29,19 +31,21 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_ = block;
// Check consistency with respect to predecessors of `block`.
- std::map<HBasicBlock*, size_t> predecessors_count;
+ ArenaSafeMap<HBasicBlock*, size_t> predecessors_count(
+ std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
for (HBasicBlock* p : block->GetPredecessors()) {
- ++predecessors_count[p];
+ auto it = predecessors_count.find(p);
+ if (it != predecessors_count.end()) {
+ ++it->second;
+ } else {
+ predecessors_count.Put(p, 1u);
+ }
}
for (auto& pc : predecessors_count) {
HBasicBlock* p = pc.first;
size_t p_count_in_block_predecessors = pc.second;
- size_t block_count_in_p_successors = 0;
- for (HBasicBlock* p_successor : p->GetSuccessors()) {
- if (p_successor == block) {
- ++block_count_in_p_successors;
- }
- }
+ size_t block_count_in_p_successors =
+ std::count(p->GetSuccessors().begin(), p->GetSuccessors().end(), block);
if (p_count_in_block_predecessors != block_count_in_p_successors) {
AddError(StringPrintf(
"Block %d lists %zu occurrences of block %d in its predecessors, whereas "
@@ -52,19 +56,21 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
}
// Check consistency with respect to successors of `block`.
- std::map<HBasicBlock*, size_t> successors_count;
+ ArenaSafeMap<HBasicBlock*, size_t> successors_count(
+ std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
for (HBasicBlock* s : block->GetSuccessors()) {
- ++successors_count[s];
+ auto it = successors_count.find(s);
+ if (it != successors_count.end()) {
+ ++it->second;
+ } else {
+ successors_count.Put(s, 1u);
+ }
}
for (auto& sc : successors_count) {
HBasicBlock* s = sc.first;
size_t s_count_in_block_successors = sc.second;
- size_t block_count_in_s_predecessors = 0;
- for (HBasicBlock* s_predecessor : s->GetPredecessors()) {
- if (s_predecessor == block) {
- ++block_count_in_s_predecessors;
- }
- }
+ size_t block_count_in_s_predecessors =
+ std::count(s->GetPredecessors().begin(), s->GetPredecessors().end(), block);
if (s_count_in_block_successors != block_count_in_s_predecessors) {
AddError(StringPrintf(
"Block %d lists %zu occurrences of block %d in its successors, whereas "
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 7ddffc136a..abf3659d91 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -26,12 +26,11 @@ namespace art {
// A control-flow graph visitor performing various checks.
class GraphChecker : public HGraphDelegateVisitor {
public:
- GraphChecker(ArenaAllocator* allocator, HGraph* graph,
- const char* dump_prefix = "art::GraphChecker: ")
+ explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ")
: HGraphDelegateVisitor(graph),
- allocator_(allocator),
+ errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(allocator, graph->GetCurrentInstructionId(), false) {}
+ seen_ids_(graph->GetArena(), graph->GetCurrentInstructionId(), false) {}
// Check the whole graph (in insertion order).
virtual void Run() { VisitInsertionOrder(); }
@@ -65,7 +64,7 @@ class GraphChecker : public HGraphDelegateVisitor {
}
// Get the list of detected errors.
- const std::vector<std::string>& GetErrors() const {
+ const ArenaVector<std::string>& GetErrors() const {
return errors_;
}
@@ -82,11 +81,10 @@ class GraphChecker : public HGraphDelegateVisitor {
errors_.push_back(error);
}
- ArenaAllocator* const allocator_;
// The block currently visited.
HBasicBlock* current_block_ = nullptr;
// Errors encountered while checking the graph.
- std::vector<std::string> errors_;
+ ArenaVector<std::string> errors_;
private:
// String displayed before dumped errors.
@@ -102,9 +100,8 @@ class SSAChecker : public GraphChecker {
public:
typedef GraphChecker super_type;
- // TODO: There's no need to pass a separate allocator as we could get it from the graph.
- SSAChecker(ArenaAllocator* allocator, HGraph* graph)
- : GraphChecker(allocator, graph, "art::SSAChecker: ") {}
+ explicit SSAChecker(HGraph* graph)
+ : GraphChecker(graph, "art::SSAChecker: ") {}
// Check the whole graph (in reverse post-order).
void Run() OVERRIDE {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 0f6677519e..fee56c7f9e 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -50,7 +50,7 @@ static void TestCode(const uint16_t* data) {
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
}
@@ -64,7 +64,7 @@ static void TestCodeSSA(const uint16_t* data) {
graph->BuildDominatorTree();
graph->TransformToSsa();
- SSAChecker ssa_checker(&allocator, graph);
+ SSAChecker ssa_checker(graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
}
@@ -112,7 +112,7 @@ TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
ArenaAllocator allocator(&pool);
HGraph* graph = CreateSimpleCFG(&allocator);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
@@ -130,7 +130,7 @@ TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
ArenaAllocator allocator(&pool);
HGraph* graph = CreateSimpleCFG(&allocator);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 17c851cac9..d468540091 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -76,6 +76,9 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
+ void SimplifySystemArrayCopy(HInvoke* invoke);
+ void SimplifyStringEquals(HInvoke* invoke);
+
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
@@ -1051,28 +1054,100 @@ void InstructionSimplifierVisitor::VisitFakeString(HFakeString* instruction) {
instruction->GetBlock()->RemoveInstruction(instruction);
}
-void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
- if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
- HInstruction* argument = instruction->InputAt(1);
- HInstruction* receiver = instruction->InputAt(0);
- if (receiver == argument) {
- // Because String.equals is an instance call, the receiver is
- // a null check if we don't know it's null. The argument however, will
- // be the actual object. So we cannot end up in a situation where both
- // are equal but could be null.
- DCHECK(CanEnsureNotNullAt(argument, instruction));
- instruction->ReplaceWith(GetGraph()->GetIntConstant(1));
- instruction->GetBlock()->RemoveInstruction(instruction);
- } else {
- StringEqualsOptimizations optimizations(instruction);
- if (CanEnsureNotNullAt(argument, instruction)) {
- optimizations.SetArgumentNotNull();
+void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
+ HInstruction* argument = instruction->InputAt(1);
+ HInstruction* receiver = instruction->InputAt(0);
+ if (receiver == argument) {
+ // Because String.equals is an instance call, the receiver is
+ // a null check if we don't know it's null. The argument however, will
+ // be the actual object. So we cannot end up in a situation where both
+ // are equal but could be null.
+ DCHECK(CanEnsureNotNullAt(argument, instruction));
+ instruction->ReplaceWith(GetGraph()->GetIntConstant(1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else {
+ StringEqualsOptimizations optimizations(instruction);
+ if (CanEnsureNotNullAt(argument, instruction)) {
+ optimizations.SetArgumentNotNull();
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo argument_rti = argument->GetReferenceTypeInfo();
+ if (argument_rti.IsValid() && argument_rti.IsStringClass()) {
+ optimizations.SetArgumentIsString();
+ }
+ }
+}
+
+static bool IsArrayLengthOf(HInstruction* potential_length, HInstruction* potential_array) {
+ if (potential_length->IsArrayLength()) {
+ return potential_length->InputAt(0) == potential_array;
+ }
+
+ if (potential_array->IsNewArray()) {
+ return potential_array->InputAt(0) == potential_length;
+ }
+
+ return false;
+}
+
+void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) {
+ HInstruction* source = instruction->InputAt(0);
+ HInstruction* destination = instruction->InputAt(2);
+ HInstruction* count = instruction->InputAt(4);
+ SystemArrayCopyOptimizations optimizations(instruction);
+ if (CanEnsureNotNullAt(source, instruction)) {
+ optimizations.SetSourceIsNotNull();
+ }
+ if (CanEnsureNotNullAt(destination, instruction)) {
+ optimizations.SetDestinationIsNotNull();
+ }
+ if (destination == source) {
+ optimizations.SetDestinationIsSource();
+ }
+
+ if (IsArrayLengthOf(count, source)) {
+ optimizations.SetCountIsSourceLength();
+ }
+
+ if (IsArrayLengthOf(count, destination)) {
+ optimizations.SetCountIsDestinationLength();
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo destination_rti = destination->GetReferenceTypeInfo();
+ if (destination_rti.IsValid()) {
+ if (destination_rti.IsObjectArray()) {
+ if (destination_rti.IsExact()) {
+ optimizations.SetDoesNotNeedTypeCheck();
+ }
+ optimizations.SetDestinationIsTypedObjectArray();
}
- ScopedObjectAccess soa(Thread::Current());
- if (argument->GetReferenceTypeInfo().IsStringClass()) {
- optimizations.SetArgumentIsString();
+ if (destination_rti.IsPrimitiveArrayClass()) {
+ optimizations.SetDestinationIsPrimitiveArray();
+ } else if (destination_rti.IsNonPrimitiveArrayClass()) {
+ optimizations.SetDestinationIsNonPrimitiveArray();
}
}
+ ReferenceTypeInfo source_rti = source->GetReferenceTypeInfo();
+ if (source_rti.IsValid()) {
+ if (destination_rti.IsValid() && destination_rti.CanArrayHoldValuesOf(source_rti)) {
+ optimizations.SetDoesNotNeedTypeCheck();
+ }
+ if (source_rti.IsPrimitiveArrayClass()) {
+ optimizations.SetSourceIsPrimitiveArray();
+ } else if (source_rti.IsNonPrimitiveArrayClass()) {
+ optimizations.SetSourceIsNonPrimitiveArray();
+ }
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
+ if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
+ SimplifyStringEquals(instruction);
+ } else if (instruction->GetIntrinsic() == Intrinsics::kSystemArrayCopy) {
+ SimplifySystemArrayCopy(instruction);
}
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 95646222ef..dbe75249be 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -210,6 +210,9 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
case kIntrinsicSystemArrayCopyCharArray:
return Intrinsics::kSystemArrayCopyChar;
+ case kIntrinsicSystemArrayCopy:
+ return Intrinsics::kSystemArrayCopy;
+
// Thread.currentThread.
case kIntrinsicCurrentThread:
return Intrinsics::kThreadCurrentThread;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index d50fe79f93..e459516e59 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -168,6 +168,26 @@ class StringEqualsOptimizations : public IntrinsicOptimizations {
DISALLOW_COPY_AND_ASSIGN(StringEqualsOptimizations);
};
+class SystemArrayCopyOptimizations : public IntrinsicOptimizations {
+ public:
+ explicit SystemArrayCopyOptimizations(HInvoke* invoke) : IntrinsicOptimizations(invoke) {}
+
+ INTRINSIC_OPTIMIZATION(SourceIsNotNull, 0);
+ INTRINSIC_OPTIMIZATION(DestinationIsNotNull, 1);
+ INTRINSIC_OPTIMIZATION(DestinationIsSource, 2);
+ INTRINSIC_OPTIMIZATION(CountIsSourceLength, 3);
+ INTRINSIC_OPTIMIZATION(CountIsDestinationLength, 4);
+ INTRINSIC_OPTIMIZATION(DoesNotNeedTypeCheck, 5);
+ INTRINSIC_OPTIMIZATION(DestinationIsTypedObjectArray, 6);
+ INTRINSIC_OPTIMIZATION(DestinationIsNonPrimitiveArray, 7);
+ INTRINSIC_OPTIMIZATION(DestinationIsPrimitiveArray, 8);
+ INTRINSIC_OPTIMIZATION(SourceIsNonPrimitiveArray, 9);
+ INTRINSIC_OPTIMIZATION(SourceIsPrimitiveArray, 10);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SystemArrayCopyOptimizations);
+};
+
#undef INTRISIC_OPTIMIZATION
} // namespace art
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 938c78e9c1..58e479afc7 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1307,6 +1307,308 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke)
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitSystemArrayCopy(HInvoke* invoke) {
+ CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+ LocationSummary* locations = invoke->GetLocations();
+ if (locations == nullptr) {
+ return;
+ }
+
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+
+ if (src_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(src_pos->GetValue())) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ if (dest_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(dest_pos->GetValue())) {
+ locations->SetInAt(3, Location::RequiresRegister());
+ }
+ if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
+ locations->SetInAt(4, Location::RequiresRegister());
+ }
+}
+
+static void CheckPosition(ArmAssembler* assembler,
+ Location pos,
+ Register input,
+ Location length,
+ SlowPathCode* slow_path,
+ Register input_len,
+ Register temp,
+ bool length_is_input_length = false) {
+ // Where is the length in the Array?
+ const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
+ if (pos_const == 0) {
+ if (!length_is_input_length) {
+ // Check that length(input) >= length.
+ __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+ } else {
+ // Check that length(input) >= pos.
+ __ LoadFromOffset(kLoadWord, input_len, input, length_offset);
+ __ subs(temp, input_len, ShifterOperand(pos_const));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that (length(input) - pos) >= length.
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+ } else if (length_is_input_length) {
+ // The only way the copy can succeed is if pos is zero.
+ Register pos_reg = pos.AsRegister<Register>();
+ __ CompareAndBranchIfNonZero(pos_reg, slow_path->GetEntryLabel());
+ } else {
+ // Check that pos >= 0.
+ Register pos_reg = pos.AsRegister<Register>();
+ __ cmp(pos_reg, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that pos <= length(input).
+ __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+ __ subs(temp, temp, ShifterOperand(pos_reg));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that (length(input) - pos) >= length.
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+}
+
+void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+ Register src = locations->InAt(0).AsRegister<Register>();
+ Location src_pos = locations->InAt(1);
+ Register dest = locations->InAt(2).AsRegister<Register>();
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ Label ok;
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (!optimizations.GetDestinationIsSource()) {
+ if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+ __ cmp(src, ShifterOperand(dest));
+ }
+ }
+
+ // If source and destination are the same, we go to slow path if we need to do
+ // forward copying.
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ if (dest_pos.IsConstant()) {
+ // Checked when building locations.
+ DCHECK(!optimizations.GetDestinationIsSource()
+ || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ b(&ok, NE);
+ }
+ __ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
+ __ b(slow_path->GetEntryLabel(), GT);
+ }
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ b(&ok, NE);
+ }
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos_constant));
+ } else {
+ __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+
+ __ Bind(&ok);
+
+ if (!optimizations.GetSourceIsNotNull()) {
+ // Bail out if the source is null.
+ __ CompareAndBranchIfZero(src, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+ // Bail out if the destination is null.
+ __ CompareAndBranchIfZero(dest, slow_path->GetEntryLabel());
+ }
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant() &&
+ !optimizations.GetCountIsSourceLength() &&
+ !optimizations.GetCountIsDestinationLength()) {
+ __ cmp(length.AsRegister<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler,
+ src_pos,
+ src,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsSourceLength());
+
+ // Validity checks: dest.
+ CheckPosition(assembler,
+ dest_pos,
+ dest,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsDestinationLength());
+
+ if (!optimizations.GetDoesNotNeedTypeCheck()) {
+ // Check whether all elements of the source array are assignable to the component
+ // type of the destination array. We do two checks: the classes are the same,
+ // or the destination is Object[]. If none of these checks succeed, we go to the
+ // slow path.
+ __ LoadFromOffset(kLoadWord, temp1, dest, class_offset);
+ __ LoadFromOffset(kLoadWord, temp2, src, class_offset);
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoisoned them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ // Bail out if the destination is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ Label do_copy;
+ __ b(&do_copy, EQ);
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // No need to unpoison the result, we're comparing against null.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
+ }
+ } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+ // Bail out if the source is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ // Compute base source address, base destination address, and end source address.
+
+ uint32_t element_size = sizeof(int32_t);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp1, src, element_size * constant + offset);
+ } else {
+ __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, 2));
+ __ AddConstant(temp1, offset);
+ }
+
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp2, dest, element_size * constant + offset);
+ } else {
+ __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, 2));
+ __ AddConstant(temp2, offset);
+ }
+
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp3, temp1, element_size * constant);
+ } else {
+ __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, 2));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison, nor do any read barrier as the next uses of the destination
+ // array will do it.
+ Label loop, done;
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&done, EQ);
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
+ __ Bind(&done);
+
+ // We only need one card marking on the destination array.
+ codegen_->MarkGCCard(temp1,
+ temp2,
+ dest,
+ Register(kNoRegister),
+ false);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 2abb605e6e..127e9a4aa0 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -33,8 +33,10 @@ class CodeGeneratorARM;
class IntrinsicLocationsBuilderARM FINAL : public IntrinsicVisitor {
public:
- IntrinsicLocationsBuilderARM(ArenaAllocator* arena, const ArmInstructionSetFeatures& features)
- : arena_(arena), features_(features) {}
+ IntrinsicLocationsBuilderARM(ArenaAllocator* arena,
+ ArmAssembler* assembler,
+ const ArmInstructionSetFeatures& features)
+ : arena_(arena), assembler_(assembler), features_(features) {}
// Define visitor methods.
@@ -52,6 +54,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
private:
ArenaAllocator* arena_;
+ ArmAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index b0cfd0d1bc..4da94ee9b3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1447,6 +1447,7 @@ void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED
}
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index bfe5e55c56..8f1d5e1c4d 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -58,6 +58,7 @@
V(MathRoundDouble, kStatic, kNeedsEnvironmentOrCache) \
V(MathRoundFloat, kStatic, kNeedsEnvironmentOrCache) \
V(SystemArrayCopyChar, kStatic, kNeedsEnvironmentOrCache) \
+ V(SystemArrayCopy, kStatic, kNeedsEnvironmentOrCache) \
V(ThreadCurrentThread, kStatic, kNeedsEnvironmentOrCache) \
V(MemoryPeekByte, kStatic, kNeedsEnvironmentOrCache) \
V(MemoryPeekIntNative, kStatic, kNeedsEnvironmentOrCache) \
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index b60905d682..764a11475f 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -812,6 +812,7 @@ UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 263c37596f..e83aebb5be 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2255,6 +2255,7 @@ UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(LongRotateRight)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 098db4ca28..e0d88a91d3 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -752,7 +752,7 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke)
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);
- // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
+ // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
@@ -768,19 +768,27 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke)
static void CheckPosition(X86_64Assembler* assembler,
Location pos,
CpuRegister input,
- CpuRegister length,
+ Location length,
SlowPathCode* slow_path,
CpuRegister input_len,
- CpuRegister temp) {
- // Where is the length in the String?
+ CpuRegister temp,
+ bool length_is_input_length = false) {
+ // Where is the length in the Array?
const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
if (pos.IsConstant()) {
int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
if (pos_const == 0) {
- // Check that length(input) >= length.
- __ cmpl(Address(input, length_offset), length);
- __ j(kLess, slow_path->GetEntryLabel());
+ if (!length_is_input_length) {
+ // Check that length(input) >= length.
+ if (length.IsConstant()) {
+ __ cmpl(Address(input, length_offset),
+ Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(Address(input, length_offset), length.AsRegister<CpuRegister>());
+ }
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
} else {
// Check that length(input) >= pos.
__ movl(input_len, Address(input, length_offset));
@@ -789,9 +797,18 @@ static void CheckPosition(X86_64Assembler* assembler,
// Check that (length(input) - pos) >= length.
__ leal(temp, Address(input_len, -pos_const));
- __ cmpl(temp, length);
+ if (length.IsConstant()) {
+ __ cmpl(temp, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(temp, length.AsRegister<CpuRegister>());
+ }
__ j(kLess, slow_path->GetEntryLabel());
}
+ } else if (length_is_input_length) {
+ // The only way the copy can succeed is if pos is zero.
+ CpuRegister pos_reg = pos.AsRegister<CpuRegister>();
+ __ testl(pos_reg, pos_reg);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
} else {
// Check that pos >= 0.
CpuRegister pos_reg = pos.AsRegister<CpuRegister>();
@@ -805,7 +822,11 @@ static void CheckPosition(X86_64Assembler* assembler,
// Check that (length(input) - pos) >= length.
__ movl(temp, Address(input, length_offset));
__ subl(temp, pos_reg);
- __ cmpl(temp, length);
+ if (length.IsConstant()) {
+ __ cmpl(temp, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(temp, length.AsRegister<CpuRegister>());
+ }
__ j(kLess, slow_path->GetEntryLabel());
}
}
@@ -815,9 +836,9 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
- Location srcPos = locations->InAt(1);
+ Location src_pos = locations->InAt(1);
CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
- Location destPos = locations->InAt(3);
+ Location dest_pos = locations->InAt(3);
Location length = locations->InAt(4);
// Temporaries that we need for MOVSW.
@@ -850,6 +871,12 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ j(kLess, slow_path->GetEntryLabel());
}
+ // Validity checks: source.
+ CheckPosition(assembler, src_pos, src, length, slow_path, src_base, dest_base);
+
+ // Validity checks: dest.
+ CheckPosition(assembler, dest_pos, dest, length, slow_path, src_base, dest_base);
+
// We need the count in RCX.
if (length.IsConstant()) {
__ movl(count, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
@@ -857,12 +884,6 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ movl(count, length.AsRegister<CpuRegister>());
}
- // Validity checks: source.
- CheckPosition(assembler, srcPos, src, count, slow_path, src_base, dest_base);
-
- // Validity checks: dest.
- CheckPosition(assembler, destPos, dest, count, slow_path, src_base, dest_base);
-
// Okay, everything checks out. Finally time to do the copy.
// Check assumption that sizeof(Char) is 2 (used in scaling below).
const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
@@ -870,18 +891,18 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
- if (srcPos.IsConstant()) {
- int32_t srcPos_const = srcPos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(src_base, Address(src, char_size * srcPos_const + data_offset));
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_base, Address(src, char_size * src_pos_const + data_offset));
} else {
- __ leal(src_base, Address(src, srcPos.AsRegister<CpuRegister>(),
+ __ leal(src_base, Address(src, src_pos.AsRegister<CpuRegister>(),
ScaleFactor::TIMES_2, data_offset));
}
- if (destPos.IsConstant()) {
- int32_t destPos_const = destPos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(dest_base, Address(dest, char_size * destPos_const + data_offset));
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(dest_base, Address(dest, char_size * dest_pos_const + data_offset));
} else {
- __ leal(dest_base, Address(dest, destPos.AsRegister<CpuRegister>(),
+ __ leal(dest_base, Address(dest, dest_pos.AsRegister<CpuRegister>(),
ScaleFactor::TIMES_2, data_offset));
}
@@ -891,6 +912,231 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+
+void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
+ CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+ CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
+ Location src_pos = locations->InAt(1);
+ CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ CpuRegister temp1 = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister temp2 = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister temp3 = locations->GetTemp(2).AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ NearLabel ok;
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (!optimizations.GetDestinationIsSource()) {
+ if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+ __ cmpl(src, dest);
+ }
+ }
+
+ // If source and destination are the same, we go to slow path if we need to do
+ // forward copying.
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ if (dest_pos.IsConstant()) {
+ // Checked when building locations.
+ DCHECK(!optimizations.GetDestinationIsSource()
+ || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ j(kNotEqual, &ok);
+ }
+ __ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant));
+ __ j(kGreater, slow_path->GetEntryLabel());
+ }
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ j(kNotEqual, &ok);
+ }
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ cmpl(src_pos.AsRegister<CpuRegister>(), Immediate(dest_pos_constant));
+ __ j(kLess, slow_path->GetEntryLabel());
+ } else {
+ __ cmpl(src_pos.AsRegister<CpuRegister>(), dest_pos.AsRegister<CpuRegister>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+ }
+
+ __ Bind(&ok);
+
+ if (!optimizations.GetSourceIsNotNull()) {
+ // Bail out if the source is null.
+ __ testl(src, src);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+ // Bail out if the destination is null.
+ __ testl(dest, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ }
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant() &&
+ !optimizations.GetCountIsSourceLength() &&
+ !optimizations.GetCountIsDestinationLength()) {
+ __ testl(length.AsRegister<CpuRegister>(), length.AsRegister<CpuRegister>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler,
+ src_pos,
+ src,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsSourceLength());
+
+ // Validity checks: dest.
+ CheckPosition(assembler,
+ dest_pos,
+ dest,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsDestinationLength());
+
+ if (!optimizations.GetDoesNotNeedTypeCheck()) {
+ // Check whether all elements of the source array are assignable to the component
+ // type of the destination array. We do two checks: the classes are the same,
+ // or the destination is Object[]. If none of these checks succeed, we go to the
+ // slow path.
+ __ movl(temp1, Address(dest, class_offset));
+ __ movl(temp2, Address(src, class_offset));
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoisoned them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ __ movl(CpuRegister(TMP), Address(temp2, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ __ cmpl(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ NearLabel do_copy;
+ __ j(kEqual, &do_copy);
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ __ movl(temp1, Address(temp1, component_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ movl(temp1, Address(temp1, super_offset));
+ // No need to unpoison the result, we're comparing against null.
+ __ testl(temp1, temp1);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+ // Bail out if the source is not a non primitive array.
+ __ movl(temp1, Address(src, class_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Compute base source address, base destination address, and end source address.
+
+ uint32_t element_size = sizeof(int32_t);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp1, Address(src, element_size * constant + offset));
+ } else {
+ __ leal(temp1, Address(src, src_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp2, Address(dest, element_size * constant + offset));
+ } else {
+ __ leal(temp2, Address(dest, dest_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp3, Address(temp1, element_size * constant));
+ } else {
+ __ leal(temp3, Address(temp1, length.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, 0));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison, nor do any read barrier as the next uses of the destination
+ // array will do it.
+ NearLabel loop, done;
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+ __ Bind(&loop);
+ __ movl(CpuRegister(TMP), Address(temp1, 0));
+ __ movl(Address(temp2, 0), CpuRegister(TMP));
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+ __ Bind(&done);
+
+ // We only need one card marking on the destination array.
+ codegen_->MarkGCCard(temp1,
+ temp2,
+ dest,
+ CpuRegister(kNoRegister),
+ false);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 24a89bca4e..ed401b67c5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -606,8 +606,23 @@ static void UpdateInputsUsers(HInstruction* instruction) {
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
- InsertInstructionBefore(replacement, initial);
- initial->ReplaceWith(replacement);
+ if (initial->IsControlFlow()) {
+ // We can only replace a control flow instruction with another control flow instruction.
+ DCHECK(replacement->IsControlFlow());
+ DCHECK_EQ(replacement->GetId(), -1);
+ DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid);
+ DCHECK_EQ(initial->GetBlock(), this);
+ DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid);
+ DCHECK(initial->GetUses().IsEmpty());
+ DCHECK(initial->GetEnvUses().IsEmpty());
+ replacement->SetBlock(this);
+ replacement->SetId(GetGraph()->GetNextInstructionId());
+ instructions_.InsertInstructionBefore(replacement, initial);
+ UpdateInputsUsers(replacement);
+ } else {
+ InsertInstructionBefore(replacement, initial);
+ initial->ReplaceWith(replacement);
+ }
RemoveInstruction(initial);
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4cd5133e95..2426f8b08d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1085,7 +1085,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
- M(X86LoadFromConstantTable, Instruction)
+ M(X86LoadFromConstantTable, Instruction) \
+ M(X86PackedSwitch, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1648,7 +1649,8 @@ class ReferenceTypeInfo : ValueObject {
}
bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
- return IsValid() && GetTypeHandle()->IsStringClass();
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsStringClass();
}
bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -1662,15 +1664,36 @@ class ReferenceTypeInfo : ValueObject {
}
bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass();
}
+ bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsPrimitiveArray();
+ }
+
+ bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
+ }
+
bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ if (!IsExact()) return false;
+ if (!IsArrayClass()) return false;
+ if (!rti.IsArrayClass()) return false;
+ return GetTypeHandle()->GetComponentType()->IsAssignableFrom(
+ rti.GetTypeHandle()->GetComponentType());
+ }
+
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -4527,7 +4550,7 @@ class HArraySet : public HTemplateInstruction<3> {
class HArrayLength : public HExpression<1> {
public:
- explicit HArrayLength(HInstruction* array, uint32_t dex_pc)
+ HArrayLength(HInstruction* array, uint32_t dex_pc)
: HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
// Note that arrays do not change length, so the instruction does not
// depend on any write.
@@ -4642,6 +4665,9 @@ class HLoadClass : public HExpression<1> {
generate_clinit_check_(false),
needs_access_check_(needs_access_check),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
+ // Referrers class should not need access check. We never inline unverified
+ // methods so we can't possibly end up in this situation.
+ DCHECK(!is_referrers_class_ || !needs_access_check_);
SetRawInputAt(0, current_method);
}
@@ -4664,7 +4690,7 @@ class HLoadClass : public HExpression<1> {
bool NeedsEnvironment() const OVERRIDE {
// Will call runtime and load the class if the class is not loaded yet.
// TODO: finer grain decision.
- return !is_referrers_class_ || needs_access_check_;
+ return !is_referrers_class_;
}
bool MustGenerateClinitCheck() const {
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index f7cc872419..556217bf74 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -62,6 +62,45 @@ class HX86LoadFromConstantTable : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
};
+// X86 version of HPackedSwitch that holds a pointer to the base method address.
+class HX86PackedSwitch : public HTemplateInstruction<2> {
+ public:
+ HX86PackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HX86ComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(1)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(X86PackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b501980dbe..c7f08066d4 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -169,13 +169,13 @@ class PassObserver : public ValueObject {
if (kIsDebugBuild) {
if (!graph_in_bad_state_) {
if (graph_->IsInSsaForm()) {
- SSAChecker checker(graph_->GetArena(), graph_);
+ SSAChecker checker(graph_);
checker.Run();
if (!checker.IsValid()) {
LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<SSAChecker>(checker);
}
} else {
- GraphChecker checker(graph_->GetArena(), graph_);
+ GraphChecker checker(graph_);
checker.Run();
if (!checker.IsValid()) {
LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 967b191d32..d59bc6be40 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -470,6 +470,13 @@ class ArmAssembler : public Assembler {
orr(rd, rn, so, cond, kCcSet);
}
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void orns(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ orn(rd, rn, so, cond, kCcSet);
+ }
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
@@ -832,6 +839,8 @@ class ArmAssembler : public Assembler {
uint32_t immediate,
ShifterOperand* shifter_op) = 0;
+ virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate) = 0;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index f7772aea3d..6e7c828b4a 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -48,6 +48,11 @@ bool Arm32Assembler::ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOpera
return false;
}
+bool Arm32Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ ShifterOperand shifter_op;
+ return ShifterOperandCanHoldArm32(immediate, &shifter_op);
+}
+
bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
Register rn ATTRIBUTE_UNUSED,
Opcode opcode ATTRIBUTE_UNUSED,
@@ -130,6 +135,15 @@ void Arm32Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
}
+void Arm32Assembler::orn(Register rd ATTRIBUTE_UNUSED,
+ Register rn ATTRIBUTE_UNUSED,
+ const ShifterOperand& so ATTRIBUTE_UNUSED,
+ Condition cond ATTRIBUTE_UNUSED,
+ SetCc set_cc ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "orn is not supported on ARM32";
+}
+
+
void Arm32Assembler::mov(Register rd, const ShifterOperand& so,
Condition cond, SetCc set_cc) {
EmitType01(cond, so.type(), MOV, set_cc, R0, rd, so);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 3407369654..4646538716 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -74,6 +74,9 @@ class Arm32Assembler FINAL : public ArmAssembler {
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
@@ -294,6 +297,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
uint32_t immediate,
ShifterOperand* shifter_op) OVERRIDE;
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 0f6c4f5a34..cc87856e82 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -390,6 +390,10 @@ void Thumb2Assembler::FinalizeCode() {
EmitLiterals();
}
+bool Thumb2Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+}
+
bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
Register rn ATTRIBUTE_UNUSED,
Opcode opcode,
@@ -410,6 +414,7 @@ bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
case MOV:
// TODO: Support less than or equal to 12bits.
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+
case MVN:
default:
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
@@ -492,6 +497,12 @@ void Thumb2Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
}
+void Thumb2Assembler::orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, ORN, set_cc, rn, rd, so);
+}
+
+
void Thumb2Assembler::mov(Register rd, const ShifterOperand& so,
Condition cond, SetCc set_cc) {
EmitDataProcessing(cond, MOV, set_cc, R0, rd, so);
@@ -1105,6 +1116,7 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
rn_is_valid = false; // There is no Rn for these instructions.
break;
case TEQ:
+ case ORN:
return true;
case ADD:
case SUB:
@@ -1222,6 +1234,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
case MOV: thumb_opcode = 2U /* 0b0010 */; rn = PC; break;
case BIC: thumb_opcode = 1U /* 0b0001 */; break;
case MVN: thumb_opcode = 3U /* 0b0011 */; rn = PC; break;
+ case ORN: thumb_opcode = 3U /* 0b0011 */; break;
default:
break;
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index a1a8927f44..055b1379ad 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -98,6 +98,9 @@ class Thumb2Assembler FINAL : public ArmAssembler {
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
@@ -337,6 +340,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
uint32_t immediate,
ShifterOperand* shifter_op) OVERRIDE;
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 6b4daed909..2060064423 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -148,7 +148,8 @@ enum Opcode {
MOV = 13, // Move
BIC = 14, // Bit Clear
MVN = 15, // Move Not
- kMaxOperand = 16
+ ORN = 16, // Logical OR NOT.
+ kMaxOperand = 17
};
std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 496ca95ff9..b01b0fe4e0 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -122,7 +122,8 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() {
this->AdvancePC(assembler_->CodeSize());
}
-Assembler* Assembler::Create(InstructionSet instruction_set) {
+Assembler* Assembler::Create(InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
@@ -136,7 +137,9 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return new mips::MipsAssembler();
+ return new mips::MipsAssembler(instruction_set_features != nullptr
+ ? instruction_set_features->AsMipsInstructionSetFeatures()
+ : nullptr);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 1088cb1bbd..f1c0b9258a 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -20,6 +20,7 @@
#include <vector>
#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
#include "base/logging.h"
#include "base/macros.h"
#include "arm/constants_arm.h"
@@ -284,7 +285,8 @@ class DebugFrameOpCodeWriterForAssembler FINAL
class Assembler {
public:
- static Assembler* Create(InstructionSet instruction_set);
+ static Assembler* Create(InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features = nullptr);
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index bd994f46fc..b30f7d772e 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -129,13 +129,14 @@ class AssemblerTest : public testing::Test {
return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
}
- template <typename Reg1Type, typename Reg2Type, typename ImmType,
- RegisterView Reg1View, RegisterView Reg2View>
- std::string RepeatRegRegImmBits(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
- int imm_bits,
- std::string fmt) {
- const std::vector<Reg1Type*> reg1_registers = GetRegisters();
- const std::vector<Reg2Type*> reg2_registers = GetRegisters();
+ template <typename Reg1, typename Reg2, typename ImmType>
+ std::string RepeatTemplatedRegistersImmBits(void (Ass::*f)(Reg1, Reg2, ImmType),
+ int imm_bits,
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
std::string str;
std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0);
@@ -146,13 +147,13 @@ class AssemblerTest : public testing::Test {
(assembler_.get()->*f)(*reg1, *reg2, new_imm);
std::string base = fmt;
- std::string reg1_string = GetRegName<Reg1View>(*reg1);
+ std::string reg1_string = (this->*GetName1)(*reg1);
size_t reg1_index;
while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
}
- std::string reg2_string = GetRegName<Reg2View>(*reg2);
+ std::string reg2_string = (this->*GetName2)(*reg2);
size_t reg2_index;
while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
@@ -178,15 +179,75 @@ class AssemblerTest : public testing::Test {
return str;
}
- template <typename Reg1Type, typename Reg2Type, typename ImmType>
- std::string RepeatRRIb(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
- int imm_bits,
- std::string fmt) {
- return RepeatRegRegImmBits<Reg1Type,
- Reg2Type,
- ImmType,
- RegisterView::kUsePrimaryName,
- RegisterView::kUsePrimaryName>(f, imm_bits, fmt);
+ template <typename RegType, typename ImmType>
+ std::string RepeatTemplatedRegisterImmBits(void (Ass::*f)(RegType, ImmType),
+ int imm_bits,
+ const std::vector<Reg*> registers,
+ std::string (AssemblerTest::*GetName)(const RegType&),
+ std::string fmt) {
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0);
+
+ for (auto reg : registers) {
+ for (int64_t imm : imms) {
+ ImmType new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg, new_imm);
+ std::string base = fmt;
+
+ std::string reg_string = (this->*GetName)(*reg);
+ size_t reg_index;
+ while ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <typename ImmType>
+ std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegistersImmBits<Reg, Reg, ImmType>(f,
+ imm_bits,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename ImmType>
+ std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegisterImmBits<Reg, ImmType>(f,
+ imm_bits,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename ImmType>
+ std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegistersImmBits<FPReg, Reg, ImmType>(f,
+ imm_bits,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
}
std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index b2a354b63c..2ae88413e7 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -238,6 +238,7 @@ TEST(Thumb2AssemblerTest, DataProcessingRegister) {
__ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
@@ -371,6 +372,7 @@ TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
__ sub(R0, R1, ShifterOperand(0x55));
__ and_(R0, R1, ShifterOperand(0x55));
__ orr(R0, R1, ShifterOperand(0x55));
+ __ orn(R0, R1, ShifterOperand(0x55));
__ eor(R0, R1, ShifterOperand(0x55));
__ bic(R0, R1, ShifterOperand(0x55));
__ adc(R0, R1, ShifterOperand(0x55));
@@ -403,6 +405,7 @@ TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
__ sub(R0, R1, ShifterOperand(0x550055));
__ and_(R0, R1, ShifterOperand(0x550055));
__ orr(R0, R1, ShifterOperand(0x550055));
+ __ orn(R0, R1, ShifterOperand(0x550055));
__ eor(R0, R1, ShifterOperand(0x550055));
__ bic(R0, R1, ShifterOperand(0x550055));
__ adc(R0, R1, ShifterOperand(0x550055));
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 82ad6429bf..b79c2e46f0 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -23,109 +23,110 @@ const char* DataProcessingRegisterResults[] = {
" 8: eba1 0002 sub.w r0, r1, r2\n",
" c: ea01 0002 and.w r0, r1, r2\n",
" 10: ea41 0002 orr.w r0, r1, r2\n",
- " 14: ea81 0002 eor.w r0, r1, r2\n",
- " 18: ea21 0002 bic.w r0, r1, r2\n",
- " 1c: eb41 0002 adc.w r0, r1, r2\n",
- " 20: eb61 0002 sbc.w r0, r1, r2\n",
- " 24: ebc1 0002 rsb r0, r1, r2\n",
- " 28: ea90 0f01 teq r0, r1\n",
- " 2c: 0008 movs r0, r1\n",
- " 2e: 4608 mov r0, r1\n",
- " 30: 43c8 mvns r0, r1\n",
- " 32: 4408 add r0, r1\n",
- " 34: 1888 adds r0, r1, r2\n",
- " 36: 1a88 subs r0, r1, r2\n",
- " 38: 4148 adcs r0, r1\n",
- " 3a: 4188 sbcs r0, r1\n",
- " 3c: 4008 ands r0, r1\n",
- " 3e: 4308 orrs r0, r1\n",
- " 40: 4048 eors r0, r1\n",
- " 42: 4388 bics r0, r1\n",
- " 44: 4208 tst r0, r1\n",
- " 46: 4288 cmp r0, r1\n",
- " 48: 42c8 cmn r0, r1\n",
- " 4a: 4641 mov r1, r8\n",
- " 4c: 4681 mov r9, r0\n",
- " 4e: 46c8 mov r8, r9\n",
- " 50: 4441 add r1, r8\n",
- " 52: 4481 add r9, r0\n",
- " 54: 44c8 add r8, r9\n",
- " 56: 4548 cmp r0, r9\n",
- " 58: 4588 cmp r8, r1\n",
- " 5a: 45c1 cmp r9, r8\n",
- " 5c: 4248 negs r0, r1\n",
- " 5e: 4240 negs r0, r0\n",
- " 60: ea5f 0008 movs.w r0, r8\n",
- " 64: ea7f 0008 mvns.w r0, r8\n",
- " 68: eb01 0008 add.w r0, r1, r8\n",
- " 6c: eb11 0008 adds.w r0, r1, r8\n",
- " 70: ebb1 0008 subs.w r0, r1, r8\n",
- " 74: eb50 0008 adcs.w r0, r0, r8\n",
- " 78: eb70 0008 sbcs.w r0, r0, r8\n",
- " 7c: ea10 0008 ands.w r0, r0, r8\n",
- " 80: ea50 0008 orrs.w r0, r0, r8\n",
- " 84: ea90 0008 eors.w r0, r0, r8\n",
- " 88: ea30 0008 bics.w r0, r0, r8\n",
- " 8c: ea10 0f08 tst.w r0, r8\n",
- " 90: eb10 0f08 cmn.w r0, r8\n",
- " 94: f1d8 0000 rsbs r0, r8, #0\n",
- " 98: f1d8 0800 rsbs r8, r8, #0\n",
- " 9c: bf08 it eq\n",
- " 9e: ea7f 0001 mvnseq.w r0, r1\n",
- " a2: bf08 it eq\n",
- " a4: eb11 0002 addseq.w r0, r1, r2\n",
- " a8: bf08 it eq\n",
- " aa: ebb1 0002 subseq.w r0, r1, r2\n",
- " ae: bf08 it eq\n",
- " b0: eb50 0001 adcseq.w r0, r0, r1\n",
- " b4: bf08 it eq\n",
- " b6: eb70 0001 sbcseq.w r0, r0, r1\n",
- " ba: bf08 it eq\n",
- " bc: ea10 0001 andseq.w r0, r0, r1\n",
- " c0: bf08 it eq\n",
- " c2: ea50 0001 orrseq.w r0, r0, r1\n",
- " c6: bf08 it eq\n",
- " c8: ea90 0001 eorseq.w r0, r0, r1\n",
- " cc: bf08 it eq\n",
- " ce: ea30 0001 bicseq.w r0, r0, r1\n",
- " d2: bf08 it eq\n",
- " d4: 43c8 mvneq r0, r1\n",
+ " 14: ea61 0002 orn r0, r1, r2\n",
+ " 18: ea81 0002 eor.w r0, r1, r2\n",
+ " 1c: ea21 0002 bic.w r0, r1, r2\n",
+ " 20: eb41 0002 adc.w r0, r1, r2\n",
+ " 24: eb61 0002 sbc.w r0, r1, r2\n",
+ " 28: ebc1 0002 rsb r0, r1, r2\n",
+ " 2c: ea90 0f01 teq r0, r1\n",
+ " 30: 0008 movs r0, r1\n",
+ " 32: 4608 mov r0, r1\n",
+ " 34: 43c8 mvns r0, r1\n",
+ " 36: 4408 add r0, r1\n",
+ " 38: 1888 adds r0, r1, r2\n",
+ " 3a: 1a88 subs r0, r1, r2\n",
+ " 3c: 4148 adcs r0, r1\n",
+ " 3e: 4188 sbcs r0, r1\n",
+ " 40: 4008 ands r0, r1\n",
+ " 42: 4308 orrs r0, r1\n",
+ " 44: 4048 eors r0, r1\n",
+ " 46: 4388 bics r0, r1\n",
+ " 48: 4208 tst r0, r1\n",
+ " 4a: 4288 cmp r0, r1\n",
+ " 4c: 42c8 cmn r0, r1\n",
+ " 4e: 4641 mov r1, r8\n",
+ " 50: 4681 mov r9, r0\n",
+ " 52: 46c8 mov r8, r9\n",
+ " 54: 4441 add r1, r8\n",
+ " 56: 4481 add r9, r0\n",
+ " 58: 44c8 add r8, r9\n",
+ " 5a: 4548 cmp r0, r9\n",
+ " 5c: 4588 cmp r8, r1\n",
+ " 5e: 45c1 cmp r9, r8\n",
+ " 60: 4248 negs r0, r1\n",
+ " 62: 4240 negs r0, r0\n",
+ " 64: ea5f 0008 movs.w r0, r8\n",
+ " 68: ea7f 0008 mvns.w r0, r8\n",
+ " 6c: eb01 0008 add.w r0, r1, r8\n",
+ " 70: eb11 0008 adds.w r0, r1, r8\n",
+ " 74: ebb1 0008 subs.w r0, r1, r8\n",
+ " 78: eb50 0008 adcs.w r0, r0, r8\n",
+ " 7c: eb70 0008 sbcs.w r0, r0, r8\n",
+ " 80: ea10 0008 ands.w r0, r0, r8\n",
+ " 84: ea50 0008 orrs.w r0, r0, r8\n",
+ " 88: ea90 0008 eors.w r0, r0, r8\n",
+ " 8c: ea30 0008 bics.w r0, r0, r8\n",
+ " 90: ea10 0f08 tst.w r0, r8\n",
+ " 94: eb10 0f08 cmn.w r0, r8\n",
+ " 98: f1d8 0000 rsbs r0, r8, #0\n",
+ " 9c: f1d8 0800 rsbs r8, r8, #0\n",
+ " a0: bf08 it eq\n",
+ " a2: ea7f 0001 mvnseq.w r0, r1\n",
+ " a6: bf08 it eq\n",
+ " a8: eb11 0002 addseq.w r0, r1, r2\n",
+ " ac: bf08 it eq\n",
+ " ae: ebb1 0002 subseq.w r0, r1, r2\n",
+ " b2: bf08 it eq\n",
+ " b4: eb50 0001 adcseq.w r0, r0, r1\n",
+ " b8: bf08 it eq\n",
+ " ba: eb70 0001 sbcseq.w r0, r0, r1\n",
+ " be: bf08 it eq\n",
+ " c0: ea10 0001 andseq.w r0, r0, r1\n",
+ " c4: bf08 it eq\n",
+ " c6: ea50 0001 orrseq.w r0, r0, r1\n",
+ " ca: bf08 it eq\n",
+ " cc: ea90 0001 eorseq.w r0, r0, r1\n",
+ " d0: bf08 it eq\n",
+ " d2: ea30 0001 bicseq.w r0, r0, r1\n",
" d6: bf08 it eq\n",
- " d8: 1888 addeq r0, r1, r2\n",
+ " d8: 43c8 mvneq r0, r1\n",
" da: bf08 it eq\n",
- " dc: 1a88 subeq r0, r1, r2\n",
+ " dc: 1888 addeq r0, r1, r2\n",
" de: bf08 it eq\n",
- " e0: 4148 adceq r0, r1\n",
+ " e0: 1a88 subeq r0, r1, r2\n",
" e2: bf08 it eq\n",
- " e4: 4188 sbceq r0, r1\n",
+ " e4: 4148 adceq r0, r1\n",
" e6: bf08 it eq\n",
- " e8: 4008 andeq r0, r1\n",
+ " e8: 4188 sbceq r0, r1\n",
" ea: bf08 it eq\n",
- " ec: 4308 orreq r0, r1\n",
+ " ec: 4008 andeq r0, r1\n",
" ee: bf08 it eq\n",
- " f0: 4048 eoreq r0, r1\n",
+ " f0: 4308 orreq r0, r1\n",
" f2: bf08 it eq\n",
- " f4: 4388 biceq r0, r1\n",
- " f6: 4608 mov r0, r1\n",
- " f8: 43c8 mvns r0, r1\n",
- " fa: 4408 add r0, r1\n",
- " fc: 1888 adds r0, r1, r2\n",
- " fe: 1a88 subs r0, r1, r2\n",
- " 100: 4148 adcs r0, r1\n",
- " 102: 4188 sbcs r0, r1\n",
- " 104: 4008 ands r0, r1\n",
- " 106: 4308 orrs r0, r1\n",
- " 108: 4048 eors r0, r1\n",
- " 10a: 4388 bics r0, r1\n",
- " 10c: 4641 mov r1, r8\n",
- " 10e: 4681 mov r9, r0\n",
- " 110: 46c8 mov r8, r9\n",
- " 112: 4441 add r1, r8\n",
- " 114: 4481 add r9, r0\n",
- " 116: 44c8 add r8, r9\n",
- " 118: 4248 negs r0, r1\n",
- " 11a: 4240 negs r0, r0\n",
- " 11c: eb01 0c00 add.w ip, r1, r0\n",
+ " f4: 4048 eoreq r0, r1\n",
+ " f6: bf08 it eq\n",
+ " f8: 4388 biceq r0, r1\n",
+ " fa: 4608 mov r0, r1\n",
+ " fc: 43c8 mvns r0, r1\n",
+ " fe: 4408 add r0, r1\n",
+ " 100: 1888 adds r0, r1, r2\n",
+ " 102: 1a88 subs r0, r1, r2\n",
+ " 104: 4148 adcs r0, r1\n",
+ " 106: 4188 sbcs r0, r1\n",
+ " 108: 4008 ands r0, r1\n",
+ " 10a: 4308 orrs r0, r1\n",
+ " 10c: 4048 eors r0, r1\n",
+ " 10e: 4388 bics r0, r1\n",
+ " 110: 4641 mov r1, r8\n",
+ " 112: 4681 mov r9, r0\n",
+ " 114: 46c8 mov r8, r9\n",
+ " 116: 4441 add r1, r8\n",
+ " 118: 4481 add r9, r0\n",
+ " 11a: 44c8 add r8, r9\n",
+ " 11c: 4248 negs r0, r1\n",
+ " 11e: 4240 negs r0, r0\n",
+ " 120: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
const char* DataProcessingImmediateResults[] = {
@@ -135,21 +136,22 @@ const char* DataProcessingImmediateResults[] = {
" a: f2a1 0055 subw r0, r1, #85 ; 0x55\n",
" e: f001 0055 and.w r0, r1, #85 ; 0x55\n",
" 12: f041 0055 orr.w r0, r1, #85 ; 0x55\n",
- " 16: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
- " 1a: f021 0055 bic.w r0, r1, #85 ; 0x55\n",
- " 1e: f141 0055 adc.w r0, r1, #85 ; 0x55\n",
- " 22: f161 0055 sbc.w r0, r1, #85 ; 0x55\n",
- " 26: f1c1 0055 rsb r0, r1, #85 ; 0x55\n",
- " 2a: f010 0f55 tst.w r0, #85 ; 0x55\n",
- " 2e: f090 0f55 teq r0, #85 ; 0x55\n",
- " 32: 2855 cmp r0, #85 ; 0x55\n",
- " 34: f110 0f55 cmn.w r0, #85 ; 0x55\n",
- " 38: 1d48 adds r0, r1, #5\n",
- " 3a: 1f48 subs r0, r1, #5\n",
- " 3c: 2055 movs r0, #85 ; 0x55\n",
- " 3e: f07f 0055 mvns.w r0, #85 ; 0x55\n",
- " 42: 1d48 adds r0, r1, #5\n",
- " 44: 1f48 subs r0, r1, #5\n",
+ " 16: f061 0055 orn r0, r1, #85 ; 0x55\n",
+ " 1a: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
+ " 1e: f021 0055 bic.w r0, r1, #85 ; 0x55\n",
+ " 22: f141 0055 adc.w r0, r1, #85 ; 0x55\n",
+ " 26: f161 0055 sbc.w r0, r1, #85 ; 0x55\n",
+ " 2a: f1c1 0055 rsb r0, r1, #85 ; 0x55\n",
+ " 2e: f010 0f55 tst.w r0, #85 ; 0x55\n",
+ " 32: f090 0f55 teq r0, #85 ; 0x55\n",
+ " 36: 2855 cmp r0, #85 ; 0x55\n",
+ " 38: f110 0f55 cmn.w r0, #85 ; 0x55\n",
+ " 3c: 1d48 adds r0, r1, #5\n",
+ " 3e: 1f48 subs r0, r1, #5\n",
+ " 40: 2055 movs r0, #85 ; 0x55\n",
+ " 42: f07f 0055 mvns.w r0, #85 ; 0x55\n",
+ " 46: 1d48 adds r0, r1, #5\n",
+ " 48: 1f48 subs r0, r1, #5\n",
nullptr
};
const char* DataProcessingModifiedImmediateResults[] = {
@@ -159,15 +161,16 @@ const char* DataProcessingModifiedImmediateResults[] = {
" c: f1a1 1055 sub.w r0, r1, #5570645 ; 0x550055\n",
" 10: f001 1055 and.w r0, r1, #5570645 ; 0x550055\n",
" 14: f041 1055 orr.w r0, r1, #5570645 ; 0x550055\n",
- " 18: f081 1055 eor.w r0, r1, #5570645 ; 0x550055\n",
- " 1c: f021 1055 bic.w r0, r1, #5570645 ; 0x550055\n",
- " 20: f141 1055 adc.w r0, r1, #5570645 ; 0x550055\n",
- " 24: f161 1055 sbc.w r0, r1, #5570645 ; 0x550055\n",
- " 28: f1c1 1055 rsb r0, r1, #5570645 ; 0x550055\n",
- " 2c: f010 1f55 tst.w r0, #5570645 ; 0x550055\n",
- " 30: f090 1f55 teq r0, #5570645 ; 0x550055\n",
- " 34: f1b0 1f55 cmp.w r0, #5570645 ; 0x550055\n",
- " 38: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
+ " 18: f061 1055 orn r0, r1, #5570645 ; 0x550055\n",
+ " 1c: f081 1055 eor.w r0, r1, #5570645 ; 0x550055\n",
+ " 20: f021 1055 bic.w r0, r1, #5570645 ; 0x550055\n",
+ " 24: f141 1055 adc.w r0, r1, #5570645 ; 0x550055\n",
+ " 28: f161 1055 sbc.w r0, r1, #5570645 ; 0x550055\n",
+ " 2c: f1c1 1055 rsb r0, r1, #5570645 ; 0x550055\n",
+ " 30: f010 1f55 tst.w r0, #5570645 ; 0x550055\n",
+ " 34: f090 1f55 teq r0, #5570645 ; 0x550055\n",
+ " 38: f1b0 1f55 cmp.w r0, #5570645 ; 0x550055\n",
+ " 3c: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
nullptr
};
const char* DataProcessingModifiedImmediatesResults[] = {
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index ff4a1a4333..1038f44ffe 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -70,6 +70,13 @@ class Label {
public:
Label() : position_(0) {}
+ Label(Label&& src)
+ : position_(src.position_) {
+ // We must unlink/unbind the src label when moving; if not, calling the destructor on
+ // the src label would fail.
+ src.position_ = 0;
+ }
+
~Label() {
// Assert if label is being destroyed with unresolved branches pending.
CHECK(!IsLinked());
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c5fae92f3c..6f35e9ef59 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -19,6 +19,7 @@
#include "base/bit_utils.h"
#include "base/casts.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "memory_region.h"
#include "thread.h"
@@ -34,170 +35,191 @@ std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
return os;
}
-void MipsAssembler::Emit(int32_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int32_t>(value);
+void MipsAssembler::FinalizeCode() {
+ for (auto& exception_block : exception_blocks_) {
+ EmitExceptionPoll(&exception_block);
+ }
+ PromoteBranches();
+}
+
+void MipsAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ EmitBranches();
+ Assembler::FinalizeInstructions(region);
+}
+
+void MipsAssembler::EmitBranches() {
+ CHECK(!overwriting_);
+ // Switch from appending instructions at the end of the buffer to overwriting
+ // existing instructions (branch placeholders) in the buffer.
+ overwriting_ = true;
+ for (auto& branch : branches_) {
+ EmitBranch(&branch);
+ }
+ overwriting_ = false;
+}
+
+void MipsAssembler::Emit(uint32_t value) {
+ if (overwriting_) {
+ // Branches to labels are emitted into their placeholders here.
+ buffer_.Store<uint32_t>(overwrite_location_, value);
+ overwrite_location_ += sizeof(uint32_t);
+ } else {
+ // Other instructions are simply appended at the end here.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<uint32_t>(value);
+ }
}
void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
CHECK_NE(rd, kNoRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- static_cast<int32_t>(rd) << kRdShift |
- shamt << kShamtShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ static_cast<uint32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
Emit(encoding);
}
void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitI21(int opcode, Register rs, uint32_t imm21) {
+ CHECK_NE(rs, kNoRegister);
+ CHECK(IsUint<21>(imm21)) << imm21;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ imm21;
Emit(encoding);
}
-void MipsAssembler::EmitJ(int opcode, int address) {
- int32_t encoding = opcode << kOpcodeShift |
- address;
+void MipsAssembler::EmitI26(int opcode, uint32_t imm26) {
+ CHECK(IsUint<26>(imm26)) << imm26;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
Emit(encoding);
}
-void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) {
+void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd,
+ int funct) {
CHECK_NE(ft, kNoFRegister);
CHECK_NE(fs, kNoFRegister);
CHECK_NE(fd, kNoFRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(ft) << kFtShift |
- static_cast<int32_t>(fs) << kFsShift |
- static_cast<int32_t>(fd) << kFdShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ static_cast<uint32_t>(fs) << kFsShift |
+ static_cast<uint32_t>(fd) << kFdShift |
+ funct;
Emit(encoding);
}
-void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) {
- CHECK_NE(rt, kNoFRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+void MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm) {
+ CHECK_NE(ft, kNoFRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ imm;
Emit(encoding);
}
-void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the branch instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (equal) {
- Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
- } else {
- Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
- }
+void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x21);
}
-void MipsAssembler::EmitJump(Label* label, bool link) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the jump instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (link) {
- Jal((offset >> 2) & kJumpOffsetMask);
- } else {
- J((offset >> 2) & kJumpOffsetMask);
- }
+void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x9, rs, rt, imm16);
}
-int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
- CHECK_ALIGNED(offset, 4);
- CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
+void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x23);
+}
- // Properly preserve only the bits supported in the instruction.
- offset >>= 2;
- if (is_jump) {
- offset &= kJumpOffsetMask;
- return (inst & ~kJumpOffsetMask) | offset;
- } else {
- offset &= kBranchOffsetMask;
- return (inst & ~kBranchOffsetMask) | offset;
- }
+void MipsAssembler::MultR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
}
-int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
- // Sign-extend, then left-shift by 2.
- if (is_jump) {
- return (((inst & kJumpOffsetMask) << 6) >> 4);
- } else {
- return (((inst & kBranchOffsetMask) << 16) >> 14);
- }
+void MipsAssembler::MultuR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
}
-void MipsAssembler::Bind(Label* label, bool is_jump) {
- CHECK(!label->IsBound());
- int bound_pc = buffer_.Size();
- while (label->IsLinked()) {
- int32_t position = label->Position();
- int32_t next = buffer_.Load<int32_t>(position);
- int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
- int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump);
- buffer_.Store<int32_t>(position, encoded);
- label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump);
- }
- label->BindTo(bound_pc);
+void MipsAssembler::DivR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
}
-void MipsAssembler::Add(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x20);
+void MipsAssembler::DivuR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
}
-void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x21);
+void MipsAssembler::MulR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0x1c, rs, rt, rd, 0, 2);
}
-void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x8, rs, rt, imm16);
+void MipsAssembler::DivR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivR2(rs, rt);
+ Mflo(rd);
}
-void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x9, rs, rt, imm16);
+void MipsAssembler::ModR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivR2(rs, rt);
+ Mfhi(rd);
}
-void MipsAssembler::Sub(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x22);
+void MipsAssembler::DivuR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivuR2(rs, rt);
+ Mflo(rd);
}
-void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x23);
+void MipsAssembler::ModuR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivuR2(rs, rt);
+ Mfhi(rd);
}
-void MipsAssembler::Mult(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
+void MipsAssembler::MulR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x18);
}
-void MipsAssembler::Multu(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
+void MipsAssembler::MuhuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x19);
}
-void MipsAssembler::Div(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
+void MipsAssembler::DivR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x1a);
}
-void MipsAssembler::Divu(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
+void MipsAssembler::ModR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x1a);
+}
+
+void MipsAssembler::DivuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x1b);
+}
+
+void MipsAssembler::ModuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x1b);
}
void MipsAssembler::And(Register rd, Register rs, Register rt) {
@@ -228,27 +250,35 @@ void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
EmitR(0, rs, rt, rd, 0, 0x27);
}
-void MipsAssembler::Sll(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x00);
+void MipsAssembler::Seb(Register rd, Register rt) {
+ EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x10, 0x20);
}
-void MipsAssembler::Srl(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x02);
+void MipsAssembler::Seh(Register rd, Register rt) {
+ EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x18, 0x20);
}
-void MipsAssembler::Sra(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x03);
+void MipsAssembler::Sll(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x00);
}
-void MipsAssembler::Sllv(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srl(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x02);
+}
+
+void MipsAssembler::Sra(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x03);
+}
+
+void MipsAssembler::Sllv(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x04);
}
-void MipsAssembler::Srlv(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srlv(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x06);
}
-void MipsAssembler::Srav(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srav(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x07);
}
@@ -276,11 +306,18 @@ void MipsAssembler::Lui(Register rt, uint16_t imm16) {
EmitI(0xf, static_cast<Register>(0), rt, imm16);
}
+void MipsAssembler::Sync(uint32_t stype) {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0),
+ stype & 0x1f, 0xf);
+}
+
void MipsAssembler::Mfhi(Register rd) {
+ CHECK(!IsR6());
EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x10);
}
void MipsAssembler::Mflo(Register rd) {
+ CHECK(!IsR6());
EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x12);
}
@@ -312,34 +349,276 @@ void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
EmitI(0xb, rs, rt, imm16);
}
-void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) {
+void MipsAssembler::B(uint16_t imm16) {
+ EmitI(0x4, static_cast<Register>(0), static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Beq(Register rs, Register rt, uint16_t imm16) {
EmitI(0x4, rs, rt, imm16);
- Nop();
}
-void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) {
+void MipsAssembler::Bne(Register rs, Register rt, uint16_t imm16) {
EmitI(0x5, rs, rt, imm16);
- Nop();
}
-void MipsAssembler::J(uint32_t address) {
- EmitJ(0x2, address);
- Nop();
+void MipsAssembler::Beqz(Register rt, uint16_t imm16) {
+ Beq(ZERO, rt, imm16);
}
-void MipsAssembler::Jal(uint32_t address) {
- EmitJ(0x2, address);
- Nop();
+void MipsAssembler::Bnez(Register rt, uint16_t imm16) {
+ Bne(ZERO, rt, imm16);
}
-void MipsAssembler::Jr(Register rs) {
- EmitR(0, rs, static_cast<Register>(0), static_cast<Register>(0), 0, 0x09); // Jalr zero, rs
- Nop();
+void MipsAssembler::Bltz(Register rt, uint16_t imm16) {
+ EmitI(0x1, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Bgez(Register rt, uint16_t imm16) {
+ EmitI(0x1, rt, static_cast<Register>(0x1), imm16);
+}
+
+void MipsAssembler::Blez(Register rt, uint16_t imm16) {
+ EmitI(0x6, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Bgtz(Register rt, uint16_t imm16) {
+ EmitI(0x7, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::J(uint32_t addr26) {
+ EmitI26(0x2, addr26);
+}
+
+void MipsAssembler::Jal(uint32_t addr26) {
+ EmitI26(0x3, addr26);
+}
+
+void MipsAssembler::Jalr(Register rd, Register rs) {
+ EmitR(0, rs, static_cast<Register>(0), rd, 0, 0x09);
}
void MipsAssembler::Jalr(Register rs) {
- EmitR(0, rs, static_cast<Register>(0), RA, 0, 0x09);
- Nop();
+ Jalr(RA, rs);
+}
+
+void MipsAssembler::Jr(Register rs) {
+ Jalr(ZERO, rs);
+}
+
+void MipsAssembler::Nal() {
+ EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x10), 0);
+}
+
+void MipsAssembler::Auipc(Register rs, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x3B, rs, static_cast<Register>(0x1E), imm16);
+}
+
+void MipsAssembler::Addiupc(Register rs, uint32_t imm19) {
+ CHECK(IsR6());
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, imm19);
+}
+
+void MipsAssembler::Bc(uint32_t imm26) {
+ CHECK(IsR6());
+ EmitI26(0x32, imm26);
+}
+
+void MipsAssembler::Jic(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x36, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Jialc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x3E, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bltc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x17, rs, rt, imm16);
+}
+
+void MipsAssembler::Bltzc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, rt, rt, imm16);
+}
+
+void MipsAssembler::Bgtzc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bgec(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x16, rs, rt, imm16);
+}
+
+void MipsAssembler::Bgezc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, rt, rt, imm16);
+}
+
+void MipsAssembler::Blezc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bltuc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x7, rs, rt, imm16);
+}
+
+void MipsAssembler::Bgeuc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x6, rs, rt, imm16);
+}
+
+void MipsAssembler::Beqc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
+}
+
+void MipsAssembler::Bnec(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
+}
+
+void MipsAssembler::Beqzc(Register rs, uint32_t imm21) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x36, rs, imm21);
+}
+
+void MipsAssembler::Bnezc(Register rs, uint32_t imm21) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x3E, rs, imm21);
+}
+
+void MipsAssembler::EmitBcond(BranchCondition cond, Register rs, Register rt, uint16_t imm16) {
+ switch (cond) {
+ case kCondLTZ:
+ CHECK_EQ(rt, ZERO);
+ Bltz(rs, imm16);
+ break;
+ case kCondGEZ:
+ CHECK_EQ(rt, ZERO);
+ Bgez(rs, imm16);
+ break;
+ case kCondLEZ:
+ CHECK_EQ(rt, ZERO);
+ Blez(rs, imm16);
+ break;
+ case kCondGTZ:
+ CHECK_EQ(rt, ZERO);
+ Bgtz(rs, imm16);
+ break;
+ case kCondEQ:
+ Beq(rs, rt, imm16);
+ break;
+ case kCondNE:
+ Bne(rs, rt, imm16);
+ break;
+ case kCondEQZ:
+ CHECK_EQ(rt, ZERO);
+ Beqz(rs, imm16);
+ break;
+ case kCondNEZ:
+ CHECK_EQ(rt, ZERO);
+ Bnez(rs, imm16);
+ break;
+ case kCondLT:
+ case kCondGE:
+ case kCondLE:
+ case kCondGT:
+ case kCondLTU:
+ case kCondGEU:
+ case kUncond:
+ // We don't support synthetic R2 branches (preceded with slt[u]) at this level
+ // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ UNREACHABLE();
+ }
+}
+
+void MipsAssembler::EmitBcondc(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21) {
+ switch (cond) {
+ case kCondLT:
+ Bltc(rs, rt, imm16_21);
+ break;
+ case kCondGE:
+ Bgec(rs, rt, imm16_21);
+ break;
+ case kCondLE:
+ Bgec(rt, rs, imm16_21);
+ break;
+ case kCondGT:
+ Bltc(rt, rs, imm16_21);
+ break;
+ case kCondLTZ:
+ CHECK_EQ(rt, ZERO);
+ Bltzc(rs, imm16_21);
+ break;
+ case kCondGEZ:
+ CHECK_EQ(rt, ZERO);
+ Bgezc(rs, imm16_21);
+ break;
+ case kCondLEZ:
+ CHECK_EQ(rt, ZERO);
+ Blezc(rs, imm16_21);
+ break;
+ case kCondGTZ:
+ CHECK_EQ(rt, ZERO);
+ Bgtzc(rs, imm16_21);
+ break;
+ case kCondEQ:
+ Beqc(rs, rt, imm16_21);
+ break;
+ case kCondNE:
+ Bnec(rs, rt, imm16_21);
+ break;
+ case kCondEQZ:
+ CHECK_EQ(rt, ZERO);
+ Beqzc(rs, imm16_21);
+ break;
+ case kCondNEZ:
+ CHECK_EQ(rt, ZERO);
+ Bnezc(rs, imm16_21);
+ break;
+ case kCondLTU:
+ Bltuc(rs, rt, imm16_21);
+ break;
+ case kCondGEU:
+ Bgeuc(rs, rt, imm16_21);
+ break;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ UNREACHABLE();
+ }
}
void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
@@ -358,52 +637,84 @@ void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
}
-void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x0);
+void MipsAssembler::AddD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
}
-void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x1);
+void MipsAssembler::SubD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
}
-void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x2);
+void MipsAssembler::MulD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
}
-void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x3);
+void MipsAssembler::DivD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
}
void MipsAssembler::MovS(FRegister fd, FRegister fs) {
EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
}
-void MipsAssembler::MovD(DRegister fd, DRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x6);
+void MipsAssembler::MovD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x6);
+}
+
+void MipsAssembler::NegS(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x7);
+}
+
+void MipsAssembler::NegD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7);
+}
+
+void MipsAssembler::Cvtsw(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20);
+}
+
+void MipsAssembler::Cvtdw(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x21);
+}
+
+void MipsAssembler::Cvtsd(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x20);
+}
+
+void MipsAssembler::Cvtds(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21);
}
void MipsAssembler::Mfc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x00, ConvertRegToFReg(rt), fs, static_cast<FRegister>(0), 0x0);
+ EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mtc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x04, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
-void MipsAssembler::Mtc1(FRegister ft, Register rs) {
- EmitFR(0x11, 0x04, ft, ConvertRegToFReg(rs), static_cast<FRegister>(0), 0x0);
+void MipsAssembler::Mfhc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x03, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mthc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x31, rs, ConvertFRegToReg(ft), imm16);
+ EmitI(0x31, rs, static_cast<Register>(ft), imm16);
}
-void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x35, rs, ConvertDRegToReg(ft), imm16);
+void MipsAssembler::Ldc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x35, rs, static_cast<Register>(ft), imm16);
}
void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x39, rs, ConvertFRegToReg(ft), imm16);
+ EmitI(0x39, rs, static_cast<Register>(ft), imm16);
}
-void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x3d, rs, ConvertDRegToReg(ft), imm16);
+void MipsAssembler::Sdc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
}
void MipsAssembler::Break() {
@@ -415,63 +726,881 @@ void MipsAssembler::Nop() {
EmitR(0x0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0), 0, 0x0);
}
-void MipsAssembler::Move(Register rt, Register rs) {
- EmitI(0x9, rs, rt, 0); // Addiu
+void MipsAssembler::Move(Register rd, Register rs) {
+ Or(rd, rs, ZERO);
}
-void MipsAssembler::Clear(Register rt) {
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rt, 0, 0x20);
+void MipsAssembler::Clear(Register rd) {
+ Move(rd, ZERO);
}
-void MipsAssembler::Not(Register rt, Register rs) {
- EmitR(0, static_cast<Register>(0), rs, rt, 0, 0x27);
+void MipsAssembler::Not(Register rd, Register rs) {
+ Nor(rd, rs, ZERO);
}
-void MipsAssembler::Mul(Register rd, Register rs, Register rt) {
- Mult(rs, rt);
- Mflo(rd);
+void MipsAssembler::Push(Register rs) {
+ IncreaseFrameSize(kMipsWordSize);
+ Sw(rs, SP, 0);
}
-void MipsAssembler::Div(Register rd, Register rs, Register rt) {
- Div(rs, rt);
- Mflo(rd);
+void MipsAssembler::Pop(Register rd) {
+ Lw(rd, SP, 0);
+ DecreaseFrameSize(kMipsWordSize);
}
-void MipsAssembler::Rem(Register rd, Register rs, Register rt) {
- Div(rs, rt);
- Mfhi(rd);
+void MipsAssembler::PopAndReturn(Register rd, Register rt) {
+ Lw(rd, SP, 0);
+ Jr(rt);
+ DecreaseFrameSize(kMipsWordSize);
}
-void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) {
- Addiu(rt, rs, value);
+void MipsAssembler::LoadConst32(Register rd, int32_t value) {
+ if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ Addiu(rd, ZERO, value);
+ } else {
+ Lui(rd, High16Bits(value));
+ if (value & 0xFFFF)
+ Ori(rd, rd, Low16Bits(value));
+ }
}
-void MipsAssembler::LoadImmediate(Register rt, int32_t value) {
- Addiu(rt, ZERO, value);
+void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value) {
+ LoadConst32(reg_lo, Low32Bits(value));
+ LoadConst32(reg_hi, High32Bits(value));
}
-void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
- size_t size) {
- MipsManagedRegister dst = m_dst.AsMips();
- if (dst.IsNoRegister()) {
- CHECK_EQ(0u, size) << dst;
- } else if (dst.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
- } else if (dst.IsRegisterPair()) {
- CHECK_EQ(8u, size) << dst;
- LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
- LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
- } else if (dst.IsFRegister()) {
- LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+void MipsAssembler::StoreConst32ToOffset(int32_t value,
+ Register base,
+ int32_t offset,
+ Register temp) {
+ if (!IsInt<16>(offset)) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+ LoadConst32(temp, value);
+ Sw(temp, base, offset);
+}
+
+void MipsAssembler::StoreConst64ToOffset(int64_t value,
+ Register base,
+ int32_t offset,
+ Register temp) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) || !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize))) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+ LoadConst32(temp, Low32Bits(value));
+ Sw(temp, base, offset);
+ LoadConst32(temp, High32Bits(value));
+ Sw(temp, base, offset + kMipsWordSize);
+}
+
+void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) {
+ LoadConst32(temp, value);
+ Mtc1(temp, r);
+}
+
+void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
+ LoadConst32(temp, Low32Bits(value));
+ Mtc1(temp, rd);
+ LoadConst32(temp, High32Bits(value));
+ Mthc1(temp, rd);
+}
+
+void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) {
+ if (IsInt<16>(value)) {
+ Addiu(rt, rs, value);
+ } else {
+ LoadConst32(temp, value);
+ Addu(rt, rs, temp);
+ }
+}
+
+void MipsAssembler::Branch::InitShortOrLong(MipsAssembler::Branch::OffsetBits offset_size,
+ MipsAssembler::Branch::Type short_type,
+ MipsAssembler::Branch::Type long_type) {
+ type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
+}
+
+void MipsAssembler::Branch::InitializeType(bool is_call, bool is_r6) {
+ OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
+ if (is_r6) {
+ // R6
+ if (is_call) {
+ InitShortOrLong(offset_size, kR6Call, kR6LongCall);
+ } else if (condition_ == kUncond) {
+ InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch);
+ } else {
+ if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch;
+ } else {
+ InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch);
+ }
+ }
+ } else {
+ // R2
+ if (is_call) {
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ } else if (condition_ == kUncond) {
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ } else {
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ }
+ }
+ old_type_ = type_;
+}
+
+bool MipsAssembler::Branch::IsNop(BranchCondition condition, Register lhs, Register rhs) {
+ switch (condition) {
+ case kCondLT:
+ case kCondGT:
+ case kCondNE:
+ case kCondLTU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+bool MipsAssembler::Branch::IsUncond(BranchCondition condition, Register lhs, Register rhs) {
+ switch (condition) {
+ case kUncond:
+ return true;
+ case kCondGE:
+ case kCondLE:
+ case kCondEQ:
+ case kCondGEU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(0),
+ rhs_reg_(0),
+ condition_(kUncond) {
+ InitializeType(false, is_r6);
+}
+
+MipsAssembler::Branch::Branch(bool is_r6,
+ uint32_t location,
+ uint32_t target,
+ MipsAssembler::BranchCondition condition,
+ Register lhs_reg,
+ Register rhs_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(lhs_reg),
+ rhs_reg_(rhs_reg),
+ condition_(condition) {
+ CHECK_NE(condition, kUncond);
+ switch (condition) {
+ case kCondLT:
+ case kCondGE:
+ case kCondLE:
+ case kCondGT:
+ case kCondLTU:
+ case kCondGEU:
+ // We don't support synthetic R2 branches (preceded with slt[u]) at this level
+ // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
+ // We leave this up to the caller.
+ CHECK(is_r6);
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondNE:
+ // Require registers other than 0 not only for R6, but also for R2 to catch errors.
+ // To compare with 0, use dedicated kCond*Z conditions.
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_NE(rhs_reg, ZERO);
+ break;
+ case kCondLTZ:
+ case kCondGEZ:
+ case kCondLEZ:
+ case kCondGTZ:
+ case kCondEQZ:
+ case kCondNEZ:
+ // Require registers other than 0 not only for R6, but also for R2 to catch errors.
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_EQ(rhs_reg, ZERO);
+ break;
+ case kUncond:
+ UNREACHABLE();
+ }
+ CHECK(!IsNop(condition, lhs_reg, rhs_reg));
+ if (IsUncond(condition, lhs_reg, rhs_reg)) {
+ // Branch condition is always true, make the branch unconditional.
+ condition_ = kUncond;
+ }
+ InitializeType(false, is_r6);
+}
+
+MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target, Register indirect_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(indirect_reg),
+ rhs_reg_(0),
+ condition_(kUncond) {
+ CHECK_NE(indirect_reg, ZERO);
+ CHECK_NE(indirect_reg, AT);
+ InitializeType(true, is_r6);
+}
+
+MipsAssembler::BranchCondition MipsAssembler::Branch::OppositeCondition(
+ MipsAssembler::BranchCondition cond) {
+ switch (cond) {
+ case kCondLT:
+ return kCondGE;
+ case kCondGE:
+ return kCondLT;
+ case kCondLE:
+ return kCondGT;
+ case kCondGT:
+ return kCondLE;
+ case kCondLTZ:
+ return kCondGEZ;
+ case kCondGEZ:
+ return kCondLTZ;
+ case kCondLEZ:
+ return kCondGTZ;
+ case kCondGTZ:
+ return kCondLEZ;
+ case kCondEQ:
+ return kCondNE;
+ case kCondNE:
+ return kCondEQ;
+ case kCondEQZ:
+ return kCondNEZ;
+ case kCondNEZ:
+ return kCondEQZ;
+ case kCondLTU:
+ return kCondGEU;
+ case kCondGEU:
+ return kCondLTU;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ }
+ UNREACHABLE();
+}
+
+MipsAssembler::Branch::Type MipsAssembler::Branch::GetType() const {
+ return type_;
+}
+
+MipsAssembler::BranchCondition MipsAssembler::Branch::GetCondition() const {
+ return condition_;
+}
+
+Register MipsAssembler::Branch::GetLeftRegister() const {
+ return static_cast<Register>(lhs_reg_);
+}
+
+Register MipsAssembler::Branch::GetRightRegister() const {
+ return static_cast<Register>(rhs_reg_);
+}
+
+uint32_t MipsAssembler::Branch::GetTarget() const {
+ return target_;
+}
+
+uint32_t MipsAssembler::Branch::GetLocation() const {
+ return location_;
+}
+
+uint32_t MipsAssembler::Branch::GetOldLocation() const {
+ return old_location_;
+}
+
+uint32_t MipsAssembler::Branch::GetLength() const {
+ return branch_info_[type_].length;
+}
+
+uint32_t MipsAssembler::Branch::GetOldLength() const {
+ return branch_info_[old_type_].length;
+}
+
+uint32_t MipsAssembler::Branch::GetSize() const {
+ return GetLength() * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetOldSize() const {
+ return GetOldLength() * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetEndLocation() const {
+ return GetLocation() + GetSize();
+}
+
+uint32_t MipsAssembler::Branch::GetOldEndLocation() const {
+ return GetOldLocation() + GetOldSize();
+}
+
+bool MipsAssembler::Branch::IsLong() const {
+ switch (type_) {
+ // R2 short branches.
+ case kUncondBranch:
+ case kCondBranch:
+ case kCall:
+ // R6 short branches.
+ case kR6UncondBranch:
+ case kR6CondBranch:
+ case kR6Call:
+ return false;
+ // R2 long branches.
+ case kLongUncondBranch:
+ case kLongCondBranch:
+ case kLongCall:
+ // R6 long branches.
+ case kR6LongUncondBranch:
+ case kR6LongCondBranch:
+ case kR6LongCall:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool MipsAssembler::Branch::IsResolved() const {
+ return target_ != kUnresolved;
+}
+
+MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSize() const {
+ OffsetBits offset_size =
+ (type_ == kR6CondBranch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
+ ? kOffset23
+ : branch_info_[type_].offset_size;
+ return offset_size;
+}
+
+MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSizeNeeded(uint32_t location,
+ uint32_t target) {
+ // For unresolved targets assume the shortest encoding
+ // (later it will be made longer if needed).
+ if (target == kUnresolved)
+ return kOffset16;
+ int64_t distance = static_cast<int64_t>(target) - location;
+ // To simplify calculations in composite branches consisting of multiple instructions
+ // bump up the distance by a value larger than the max byte size of a composite branch.
+ distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
+ if (IsInt<kOffset16>(distance))
+ return kOffset16;
+ else if (IsInt<kOffset18>(distance))
+ return kOffset18;
+ else if (IsInt<kOffset21>(distance))
+ return kOffset21;
+ else if (IsInt<kOffset23>(distance))
+ return kOffset23;
+ else if (IsInt<kOffset28>(distance))
+ return kOffset28;
+ return kOffset32;
+}
+
+void MipsAssembler::Branch::Resolve(uint32_t target) {
+ target_ = target;
+}
+
+void MipsAssembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
+ if (location_ > expand_location) {
+ location_ += delta;
+ }
+ if (!IsResolved()) {
+ return; // Don't know the target yet.
+ }
+ if (target_ > expand_location) {
+ target_ += delta;
+ }
+}
+
+void MipsAssembler::Branch::PromoteToLong() {
+ switch (type_) {
+ // R2 short branches.
+ case kUncondBranch:
+ type_ = kLongUncondBranch;
+ break;
+ case kCondBranch:
+ type_ = kLongCondBranch;
+ break;
+ case kCall:
+ type_ = kLongCall;
+ break;
+ // R6 short branches.
+ case kR6UncondBranch:
+ type_ = kR6LongUncondBranch;
+ break;
+ case kR6CondBranch:
+ type_ = kR6LongCondBranch;
+ break;
+ case kR6Call:
+ type_ = kR6LongCall;
+ break;
+ default:
+ // Note: 'type_' is already long.
+ break;
+ }
+ CHECK(IsLong());
+}
+
+uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
+ // If the branch is still unresolved or already long, nothing to do.
+ if (IsLong() || !IsResolved()) {
+ return 0;
+ }
+ // Promote the short branch to long if the offset size is too small
+ // to hold the distance between location_ and target_.
+ if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ // The following logic is for debugging/testing purposes.
+ // Promote some short branches to long when it's not really required.
+ if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max())) {
+ int64_t distance = static_cast<int64_t>(target_) - location_;
+ distance = (distance >= 0) ? distance : -distance;
+ if (distance >= max_short_distance) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ }
+ return 0;
+}
+
+uint32_t MipsAssembler::Branch::GetOffsetLocation() const {
+ return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetOffset() const {
+ CHECK(IsResolved());
+ uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
+ // Calculate the byte distance between instructions and also account for
+ // different PC-relative origins.
+ uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
+ // Prepare the offset for encoding into the instruction(s).
+ offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
+ return offset;
+}
+
+MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+const MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) const {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+void MipsAssembler::Bind(MipsLabel* label) {
+ CHECK(!label->IsBound());
+ uint32_t bound_pc = buffer_.Size();
+
+ // Walk the list of branches referring to and preceding this label.
+ // Store the previously unknown target addresses in them.
+ while (label->IsLinked()) {
+ uint32_t branch_id = label->Position();
+ Branch* branch = GetBranch(branch_id);
+ branch->Resolve(bound_pc);
+
+ uint32_t branch_location = branch->GetLocation();
+ // Extract the location of the previous branch in the list (walking the list backwards;
+ // the previous branch ID was stored in the space reserved for this branch).
+ uint32_t prev = buffer_.Load<uint32_t>(branch_location);
+
+ // On to the previous branch in the list...
+ label->position_ = prev;
+ }
+
+ // Now make the label object contain its own location (relative to the end of the preceding
+ // branch, if any; it will be used by the branches referring to and following this label).
+ label->prev_branch_id_plus_one_ = branches_.size();
+ if (label->prev_branch_id_plus_one_) {
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ bound_pc -= branch->GetEndLocation();
+ }
+ label->BindTo(bound_pc);
+}
+
+uint32_t MipsAssembler::GetLabelLocation(MipsLabel* label) const {
+ CHECK(label->IsBound());
+ uint32_t target = label->Position();
+ if (label->prev_branch_id_plus_one_) {
+ // Get label location based on the branch preceding it.
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ target += branch->GetEndLocation();
+ }
+ return target;
+}
+
+uint32_t MipsAssembler::GetAdjustedPosition(uint32_t old_position) {
+ // We can reconstruct the adjustment by going through all the branches from the beginning
+ // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
+ // with increasing old_position, we can use the data from last AdjustedPosition() to
+ // continue where we left off and the whole loop should be O(m+n) where m is the number
+ // of positions to adjust and n is the number of branches.
+ if (old_position < last_old_position_) {
+ last_position_adjustment_ = 0;
+ last_old_position_ = 0;
+ last_branch_id_ = 0;
+ }
+ while (last_branch_id_ != branches_.size()) {
+ const Branch* branch = GetBranch(last_branch_id_);
+ if (branch->GetLocation() >= old_position + last_position_adjustment_) {
+ break;
+ }
+ last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
+ ++last_branch_id_;
+ }
+ last_old_position_ = old_position;
+ return old_position + last_position_adjustment_;
+}
+
+void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
+ uint32_t length = branches_.back().GetLength();
+ if (!label->IsBound()) {
+ // Branch forward (to a following label), distance is unknown.
+ // The first branch forward will contain 0, serving as the terminator of
+ // the list of forward-reaching branches.
+ Emit(label->position_);
+ length--;
+ // Now make the label object point to this branch
+ // (this forms a linked list of branches preceding this label).
+ uint32_t branch_id = branches_.size() - 1;
+ label->LinkTo(branch_id);
+ }
+ // Reserve space for the branch.
+ while (length--) {
+ Nop();
+ }
+}
+
+void MipsAssembler::Buncond(MipsLabel* label) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs) {
+ // If lhs = rhs, this can be a NOP.
+ if (Branch::IsNop(condition, lhs, rhs)) {
+ return;
+ }
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target, condition, lhs, rhs);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::Call(MipsLabel* label, Register indirect_reg) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target, indirect_reg);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::PromoteBranches() {
+ // Promote short branches to long as necessary.
+ bool changed;
+ do {
+ changed = false;
+ for (auto& branch : branches_) {
+ CHECK(branch.IsResolved());
+ uint32_t delta = branch.PromoteIfNeeded();
+ // If this branch has been promoted and needs to expand in size,
+ // relocate all branches by the expansion size.
+ if (delta) {
+ changed = true;
+ uint32_t expand_location = branch.GetLocation();
+ for (auto& branch2 : branches_) {
+ branch2.Relocate(expand_location, delta);
+ }
+ }
+ }
+ } while (changed);
+
+ // Account for branch expansion by resizing the code buffer
+ // and moving the code in it to its final location.
+ size_t branch_count = branches_.size();
+ if (branch_count > 0) {
+ // Resize.
+ Branch& last_branch = branches_[branch_count - 1];
+ uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
+ uint32_t old_size = buffer_.Size();
+ buffer_.Resize(old_size + size_delta);
+ // Move the code residing between branch placeholders.
+ uint32_t end = old_size;
+ for (size_t i = branch_count; i > 0; ) {
+ Branch& branch = branches_[--i];
+ uint32_t size = end - branch.GetOldEndLocation();
+ buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
+ end = branch.GetOldLocation();
+ }
+ }
+}
+
+// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
+const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] = {
+ // R2 short branches.
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kUncondBranch
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCondBranch
+ { 5, 2, 0, MipsAssembler::Branch::kOffset16, 0 }, // kCall
+ // R2 long branches.
+ { 9, 3, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongUncondBranch
+ { 10, 4, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCondBranch
+ { 6, 1, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCall
+ // R6 short branches.
+ { 1, 0, 1, MipsAssembler::Branch::kOffset28, 2 }, // kR6UncondBranch
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kR6CondBranch
+ // Exception: kOffset23 for beqzc/bnezc.
+ { 2, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Call
+ // R6 long branches.
+ { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongUncondBranch
+ { 3, 1, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCondBranch
+ { 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCall
+};
+
+// Note: make sure branch_info_[] and mitBranch() are kept synchronized.
+void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
+ CHECK_EQ(overwriting_, true);
+ overwrite_location_ = branch->GetLocation();
+ uint32_t offset = branch->GetOffset();
+ BranchCondition condition = branch->GetCondition();
+ Register lhs = branch->GetLeftRegister();
+ Register rhs = branch->GetRightRegister();
+ switch (branch->GetType()) {
+ // R2 short branches.
+ case Branch::kUncondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ B(offset);
+ Nop(); // TODO: improve by filling the delay slot.
+ break;
+ case Branch::kCondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ EmitBcond(condition, lhs, rhs, offset);
+ Nop(); // TODO: improve by filling the delay slot.
+ break;
+ case Branch::kCall:
+ Nal();
+ Nop(); // TODO: is this NOP really needed here?
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiu(lhs, RA, offset);
+ Jalr(lhs);
+ Nop();
+ break;
+
+ // R2 long branches.
+ case Branch::kLongUncondBranch:
+ // To get the value of the PC register we need to use the NAL instruction.
+ // NAL clobbers the RA register. However, RA must be preserved if the
+ // method is compiled without the entry/exit sequences that would take care
+ // of preserving RA (typically, leaf methods don't preserve RA explicitly).
+ // So, we need to preserve RA in some temporary storage ourselves. The AT
+ // register can't be used for this because we need it to load a constant
+ // which will be added to the value that NAL stores in RA. And we can't
+ // use T9 for this in the context of the JNI compiler, which uses it
+ // as a scratch register (see InterproceduralScratchRegister()).
+ // If we were to add a 32-bit constant to RA using two ADDIU instructions,
+ // we'd also need to use the ROTR instruction, which requires no less than
+ // MIPSR2.
+ // Perhaps, we could use T8 or one of R2's multiplier/divider registers
+ // (LO or HI) or even a floating-point register, but that doesn't seem
+ // like a nice solution. We may want this to work on both R6 and pre-R6.
+ // For now simply use the stack for RA. This should be OK since for the
+ // vast majority of code a short PC-relative branch is sufficient.
+ // TODO: can this be improved?
+ Push(RA);
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(AT, AT, RA);
+ Lw(RA, SP, 0);
+ Jr(AT);
+ DecreaseFrameSize(kMipsWordSize);
+ break;
+ case Branch::kLongCondBranch:
+ // The comment on case 'Branch::kLongUncondBranch' applies here as well.
+ // Note: the opposite condition branch encodes 8 as the distance, which is equal to the
+ // number of instructions skipped:
+ // (PUSH(IncreaseFrameSize(ADDIU) + SW) + NAL + LUI + ORI + ADDU + LW + JR).
+ EmitBcond(Branch::OppositeCondition(condition), lhs, rhs, 8);
+ Push(RA);
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(AT, AT, RA);
+ Lw(RA, SP, 0);
+ Jr(AT);
+ DecreaseFrameSize(kMipsWordSize);
+ break;
+ case Branch::kLongCall:
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(lhs, AT, RA);
+ Jalr(lhs);
+ Nop();
+ break;
+
+ // R6 short branches.
+ case Branch::kR6UncondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Bc(offset);
+ break;
+ case Branch::kR6CondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ EmitBcondc(condition, lhs, rhs, offset);
+ Nop(); // TODO: improve by filling the forbidden slot.
+ break;
+ case Branch::kR6Call:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiupc(lhs, offset);
+ Jialc(lhs, 0);
+ break;
+
+ // R6 long branches.
+ case Branch::kR6LongUncondBranch:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kR6LongCondBranch:
+ EmitBcondc(Branch::OppositeCondition(condition), lhs, rhs, 2);
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kR6LongCall:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in addiu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(lhs, High16Bits(offset));
+ Addiu(lhs, lhs, Low16Bits(offset));
+ Jialc(lhs, 0);
+ break;
+ }
+ CHECK_EQ(overwrite_location_, branch->GetEndLocation());
+ CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
+}
+
+void MipsAssembler::B(MipsLabel* label) {
+ Buncond(label);
+}
+
+void MipsAssembler::Jalr(MipsLabel* label, Register indirect_reg) {
+ Call(label, indirect_reg);
+}
+
+void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label) {
+ Bcond(label, kCondEQ, rs, rt);
+}
+
+void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label) {
+ Bcond(label, kCondNE, rs, rt);
+}
+
+void MipsAssembler::Beqz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondEQZ, rt);
+}
+
+void MipsAssembler::Bnez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondNEZ, rt);
+}
+
+void MipsAssembler::Bltz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondLTZ, rt);
+}
+
+void MipsAssembler::Bgez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondGEZ, rt);
+}
+
+void MipsAssembler::Blez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondLEZ, rt);
+}
+
+void MipsAssembler::Bgtz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondGTZ, rt);
+}
+
+void MipsAssembler::Blt(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondLT, rs, rt);
+ } else if (!Branch::IsNop(kCondLT, rs, rt)) {
+ // Synthesize the instruction (not available on R2).
+ Slt(AT, rs, rt);
+ Bnez(AT, label);
+ }
+}
+
+void MipsAssembler::Bge(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondGE, rs, rt);
+ } else if (Branch::IsUncond(kCondGE, rs, rt)) {
+ B(label);
+ } else {
+ // Synthesize the instruction (not available on R2).
+ Slt(AT, rs, rt);
+ Beqz(AT, label);
+ }
+}
+
+void MipsAssembler::Bltu(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondLTU, rs, rt);
+ } else if (!Branch::IsNop(kCondLTU, rs, rt)) {
+ // Synthesize the instruction (not available on R2).
+ Sltu(AT, rs, rt);
+ Bnez(AT, label);
+ }
+}
+
+void MipsAssembler::Bgeu(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondGEU, rs, rt);
+ } else if (Branch::IsUncond(kCondGEU, rs, rt)) {
+ B(label);
} else {
- CHECK(dst.IsDRegister()) << dst;
- LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ // Synthesize the instruction (not available on R2).
+ Sltu(AT, rs, rt);
+ Beqz(AT, label);
}
}
void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -488,8 +1617,16 @@ void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register
case kLoadWord:
Lw(reg, base, offset);
break;
- case kLoadWordPair:
- LOG(FATAL) << "UNREACHABLE";
+ case kLoadDoubleword:
+ if (reg == base) {
+ // This will clobber the base when loading the lower register. Since we have to load the
+ // higher register as well, this will fail. Solution: reverse the order.
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ Lw(reg, base, offset);
+ } else {
+ Lw(reg, base, offset);
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ }
break;
default:
LOG(FATAL) << "UNREACHABLE";
@@ -497,15 +1634,74 @@ void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register
}
void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
Lwc1(reg, base, offset);
}
-void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) {
- Ldc1(reg, base, offset);
+void MipsAssembler::LoadDFromOffset(FRegister reg, Register base, int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (!IsAligned<kMipsDoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
+ if (offset & 0x7) {
+ if (Is32BitFPU()) {
+ Lwc1(reg, base, offset);
+ Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Lwc1(reg, base, offset);
+ Lw(T8, base, offset + kMipsWordSize);
+ Mthc1(T8, reg);
+ }
+ } else {
+ Ldc1(reg, base, offset);
+ }
+}
+
+void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
+ size_t size) {
+ MipsManagedRegister dst = m_dst.AsMips();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(kMipsWordSize, size) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(kMipsDoublewordSize, size) << dst;
+ LoadFromOffset(kLoadDoubleword, dst.AsRegisterPairLow(), src_register, src_offset);
+ } else if (dst.IsFRegister()) {
+ if (size == kMipsWordSize) {
+ LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size) << dst;
+ LoadDFromOffset(dst.AsFRegister(), src_register, src_offset);
+ }
+ }
}
void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -516,8 +1712,11 @@ void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register
case kStoreWord:
Sw(reg, base, offset);
break;
- case kStoreWordPair:
- LOG(FATAL) << "UNREACHABLE";
+ case kStoreDoubleword:
+ CHECK_NE(reg, base);
+ CHECK_NE(static_cast<Register>(reg + 1), base);
+ Sw(reg, base, offset);
+ Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
break;
default:
LOG(FATAL) << "UNREACHABLE";
@@ -525,11 +1724,40 @@ void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register
}
void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
Swc1(reg, base, offset);
}
-void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) {
- Sdc1(reg, base, offset);
+void MipsAssembler::StoreDToOffset(FRegister reg, Register base, int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (!IsAligned<kMipsDoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
+ if (offset & 0x7) {
+ if (Is32BitFPU()) {
+ Swc1(reg, base, offset);
+ Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Mfhc1(T8, reg);
+ Swc1(reg, base, offset);
+ Sw(T8, base, offset + kMipsWordSize);
+ }
+ } else {
+ Sdc1(reg, base, offset);
+ }
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -546,7 +1774,7 @@ void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// Increase frame to required size.
IncreaseFrameSize(frame_size);
- // Push callee saves and return address
+ // Push callee saves and return address.
int stack_offset = frame_size - kFramePointerSize;
StoreToOffset(kStoreWord, RA, SP, stack_offset);
cfi_.RelOffset(DWARFReg(RA), stack_offset);
@@ -569,13 +1797,13 @@ void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
offset += spill.getSize();
} else if (reg.IsCoreRegister()) {
StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
- offset += 4;
+ offset += kMipsWordSize;
} else if (reg.IsFRegister()) {
StoreSToOffset(reg.AsFRegister(), SP, offset);
- offset += 4;
+ offset += kMipsWordSize;
} else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
+ StoreDToOffset(reg.AsOverlappingDRegisterLow(), SP, offset);
+ offset += kMipsDoublewordSize;
}
}
}
@@ -585,7 +1813,7 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi_.RememberState();
- // Pop callee saves and return address
+ // Pop callee saves and return address.
int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
for (size_t i = 0; i < callee_save_regs.size(); ++i) {
Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
@@ -601,6 +1829,7 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
// Then jump to the return address.
Jr(RA);
+ Nop();
// The CFI should be restored for any code that follows the exit block.
cfi_.RestoreState();
@@ -608,14 +1837,14 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
}
void MipsAssembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, SP, -adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Addiu32(SP, SP, -adjust);
cfi_.AdjustCFAOffset(adjust);
}
void MipsAssembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, SP, adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Addiu32(SP, SP, adjust);
cfi_.AdjustCFAOffset(-adjust);
}
@@ -624,18 +1853,20 @@ void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
if (src.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (src.IsCoreRegister()) {
- CHECK_EQ(4u, size);
+ CHECK_EQ(kMipsWordSize, size);
StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
} else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
+ CHECK_EQ(kMipsDoublewordSize, size);
StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
- SP, dest.Int32Value() + 4);
+ SP, dest.Int32Value() + kMipsWordSize);
} else if (src.IsFRegister()) {
- StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
- } else {
- CHECK(src.IsDRegister());
- StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ if (size == kMipsWordSize) {
+ StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size);
+ StoreDToOffset(src.AsFRegister(), SP, dest.Int32Value());
+ }
}
}
@@ -655,29 +1886,30 @@ void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
+ LoadConst32(scratch.AsCoreRegister(), imm);
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister mscratch) {
+void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
+ // Is this function even referenced anywhere else in the code?
+ LoadConst32(scratch.AsCoreRegister(), imm);
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
}
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -687,14 +1919,15 @@ void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
MipsManagedRegister scratch = mscratch.AsMips();
StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
}
void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
+void MipsAssembler::LoadFromThread32(ManagedRegister mdest,
+ ThreadOffset<kMipsWordSize> src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -707,7 +1940,7 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) {
MipsManagedRegister dest = mdest.AsMips();
- CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+ CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
if (kPoisonHeapReferences && unpoison_reference) {
@@ -715,16 +1948,15 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberO
}
}
-void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
+void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
MipsManagedRegister dest = mdest.AsMips();
- CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest;
+ CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
+ ThreadOffset<kMipsWordSize> offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -738,7 +1970,7 @@ void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
}
-void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) {
+void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
MipsManagedRegister dest = mdest.AsMips();
MipsManagedRegister src = msrc.AsMips();
if (!dest.Equals(src)) {
@@ -747,14 +1979,19 @@ void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*s
Move(dest.AsCoreRegister(), src.AsCoreRegister());
} else if (dest.IsFRegister()) {
CHECK(src.IsFRegister()) << src;
- MovS(dest.AsFRegister(), src.AsFRegister());
+ if (size == kMipsWordSize) {
+ MovS(dest.AsFRegister(), src.AsFRegister());
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size);
+ MovD(dest.AsFRegister(), src.AsFRegister());
+ }
} else if (dest.IsDRegister()) {
CHECK(src.IsDRegister()) << src;
- MovD(dest.AsDRegister(), src.AsDRegister());
+ MovD(dest.AsOverlappingDRegisterLow(), src.AsOverlappingDRegisterLow());
} else {
CHECK(dest.IsRegisterPair()) << dest;
CHECK(src.IsRegisterPair()) << src;
- // Ensure that the first move doesn't clobber the input of the second
+ // Ensure that the first move doesn't clobber the input of the second.
if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
@@ -766,8 +2003,7 @@ void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*s
}
}
-void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
@@ -775,8 +2011,8 @@ void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
}
void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset<kMipsWordSize> thr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -785,9 +2021,9 @@ void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -796,26 +2032,25 @@ void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
S1, thr_offs.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
+void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
+ CHECK(size == kMipsWordSize || size == kMipsDoublewordSize) << size;
+ if (size == kMipsWordSize) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
+ } else if (size == kMipsDoublewordSize) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + kMipsWordSize);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
}
}
void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
ManagedRegister mscratch, size_t size) {
Register scratch = mscratch.AsMips().AsCoreRegister();
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value());
StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
}
@@ -823,107 +2058,117 @@ void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_
void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
ManagedRegister mscratch, size_t size) {
Register scratch = mscratch.AsMips().AsCoreRegister();
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ FrameOffset src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
ManagedRegister src, Offset src_offset,
ManagedRegister mscratch, size_t size) {
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
Register scratch = mscratch.AsMips().AsCoreRegister();
LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value());
StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::MemoryBarrier(ManagedRegister) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+ // TODO: sync?
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- Label null_arg;
+ MipsLabel null_arg;
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ // E.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset).
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0);
+ LoadConst32(out_reg.AsCoreRegister(), 0);
}
- EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ Beqz(in_reg.AsCoreRegister(), &null_arg);
+ Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
- Label null_arg;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- handle_scope_offset.Int32Value());
+ MipsLabel null_arg;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ // E.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset).
+ Beqz(scratch.AsCoreRegister(), &null_arg);
+ Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
// Given a handle scope entry, load the associated reference.
void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
+ ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
CHECK(out_reg.IsCoreRegister()) << out_reg;
CHECK(in_reg.IsCoreRegister()) << in_reg;
- Label null_arg;
+ MipsLabel null_arg;
if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0);
+ LoadConst32(out_reg.AsCoreRegister(), 0);
}
- EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+ Beqz(in_reg.AsCoreRegister(), &null_arg);
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
in_reg.AsCoreRegister(), 0);
- Bind(&null_arg, false);
+ Bind(&null_arg);
}
-void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
+void MipsAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
}
-void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
+void MipsAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
}
void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
@@ -934,22 +2179,24 @@ void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister m
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
base.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- // TODO: place reference map on call
+ Nop();
+ // TODO: place reference map on call.
}
void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, base.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
scratch.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- // TODO: place reference map on call
+ Nop();
+ // TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*mscratch*/) {
+void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
@@ -958,35 +2205,38 @@ void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
}
void MipsAssembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*mscratch*/) {
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
StoreToOffset(kStoreWord, S1, SP, offset.Int32Value());
}
void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
MipsManagedRegister scratch = mscratch.AsMips();
- MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
+ exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- S1, Thread::ExceptionOffset<4>().Int32Value());
- EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false);
-}
-
-void MipsExceptionSlowPath::Emit(Assembler* sasm) {
- MipsAssembler* sp_asm = down_cast<MipsAssembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_, false);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception object as argument
- // Don't care about preserving A0 as this call won't return
- __ Move(A0, scratch_.AsCoreRegister());
- // Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
- __ Jr(T9);
- // Call never returns
- __ Break();
-#undef __
+ S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value());
+ // TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
+ // as the NAL instruction (occurring in long R2 branches) may become deprecated.
+ // For now use common for R2 and R6 instructions as this code must execute on both.
+ Bnez(scratch.AsCoreRegister(), exception_blocks_.back().Entry());
+}
+
+void MipsAssembler::EmitExceptionPoll(MipsExceptionSlowPath* exception) {
+ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving A0 as this call won't return.
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+ Move(A0, exception->scratch_.AsCoreRegister());
+ // Set up call to Thread::Current()->pDeliverException.
+ LoadFromOffset(kLoadWord, T9, S1,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value());
+ Jr(T9);
+ Nop();
+
+ // Call never returns.
+ Break();
}
} // namespace mips
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 378a59cb3e..aa187b812b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -17,54 +17,111 @@
#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#include <utility>
#include <vector>
+#include "arch/mips/instruction_set_features_mips.h"
#include "base/macros.h"
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
-#include "utils/assembler.h"
#include "offsets.h"
+#include "utils/assembler.h"
+#include "utils/label.h"
namespace art {
namespace mips {
+static constexpr size_t kMipsWordSize = 4;
+static constexpr size_t kMipsDoublewordSize = 8;
+
enum LoadOperandType {
kLoadSignedByte,
kLoadUnsignedByte,
kLoadSignedHalfword,
kLoadUnsignedHalfword,
kLoadWord,
- kLoadWordPair,
- kLoadSWord,
- kLoadDWord
+ kLoadDoubleword
};
enum StoreOperandType {
kStoreByte,
kStoreHalfword,
kStoreWord,
- kStoreWordPair,
- kStoreSWord,
- kStoreDWord
+ kStoreDoubleword
+};
+
+class MipsLabel : public Label {
+ public:
+ MipsLabel() : prev_branch_id_plus_one_(0) {}
+
+ MipsLabel(MipsLabel&& src)
+ : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {}
+
+ private:
+ uint32_t prev_branch_id_plus_one_; // To get distance from preceding branch, if any.
+
+ friend class MipsAssembler;
+ DISALLOW_COPY_AND_ASSIGN(MipsLabel);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null.
+class MipsExceptionSlowPath {
+ public:
+ explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ MipsExceptionSlowPath(MipsExceptionSlowPath&& src)
+ : scratch_(std::move(src.scratch_)),
+ stack_adjust_(std::move(src.stack_adjust_)),
+ exception_entry_(std::move(src.exception_entry_)) {}
+
+ private:
+ MipsLabel* Entry() { return &exception_entry_; }
+ const MipsManagedRegister scratch_;
+ const size_t stack_adjust_;
+ MipsLabel exception_entry_;
+
+ friend class MipsAssembler;
+ DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
class MipsAssembler FINAL : public Assembler {
public:
- MipsAssembler() {}
- virtual ~MipsAssembler() {}
+ explicit MipsAssembler(const MipsInstructionSetFeatures* instruction_set_features = nullptr)
+ : overwriting_(false),
+ overwrite_location_(0),
+ last_position_adjustment_(0),
+ last_old_position_(0),
+ last_branch_id_(0),
+ isa_features_(instruction_set_features) {}
+
+ virtual ~MipsAssembler() {
+ for (auto& branch : branches_) {
+ CHECK(branch.IsResolved());
+ }
+ }
// Emit Machine Instructions.
- void Add(Register rd, Register rs, Register rt);
void Addu(Register rd, Register rs, Register rt);
- void Addi(Register rt, Register rs, uint16_t imm16);
void Addiu(Register rt, Register rs, uint16_t imm16);
- void Sub(Register rd, Register rs, Register rt);
void Subu(Register rd, Register rs, Register rt);
- void Mult(Register rs, Register rt);
- void Multu(Register rs, Register rt);
- void Div(Register rs, Register rt);
- void Divu(Register rs, Register rt);
+
+ void MultR2(Register rs, Register rt); // R2
+ void MultuR2(Register rs, Register rt); // R2
+ void DivR2(Register rs, Register rt); // R2
+ void DivuR2(Register rs, Register rt); // R2
+ void MulR2(Register rd, Register rs, Register rt); // R2
+ void DivR2(Register rd, Register rs, Register rt); // R2
+ void ModR2(Register rd, Register rs, Register rt); // R2
+ void DivuR2(Register rd, Register rs, Register rt); // R2
+ void ModuR2(Register rd, Register rs, Register rt); // R2
+ void MulR6(Register rd, Register rs, Register rt); // R6
+ void MuhuR6(Register rd, Register rs, Register rt); // R6
+ void DivR6(Register rd, Register rs, Register rt); // R6
+ void ModR6(Register rd, Register rs, Register rt); // R6
+ void DivuR6(Register rd, Register rs, Register rt); // R6
+ void ModuR6(Register rd, Register rs, Register rt); // R6
void And(Register rd, Register rs, Register rt);
void Andi(Register rt, Register rs, uint16_t imm16);
@@ -74,12 +131,15 @@ class MipsAssembler FINAL : public Assembler {
void Xori(Register rt, Register rs, uint16_t imm16);
void Nor(Register rd, Register rs, Register rt);
- void Sll(Register rd, Register rs, int shamt);
- void Srl(Register rd, Register rs, int shamt);
- void Sra(Register rd, Register rs, int shamt);
- void Sllv(Register rd, Register rs, Register rt);
- void Srlv(Register rd, Register rs, Register rt);
- void Srav(Register rd, Register rs, Register rt);
+ void Seb(Register rd, Register rt); // R2+
+ void Seh(Register rd, Register rt); // R2+
+
+ void Sll(Register rd, Register rt, int shamt);
+ void Srl(Register rd, Register rt, int shamt);
+ void Sra(Register rd, Register rt, int shamt);
+ void Sllv(Register rd, Register rt, Register rs);
+ void Srlv(Register rd, Register rt, Register rs);
+ void Srav(Register rd, Register rt, Register rs);
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
@@ -87,8 +147,9 @@ class MipsAssembler FINAL : public Assembler {
void Lbu(Register rt, Register rs, uint16_t imm16);
void Lhu(Register rt, Register rs, uint16_t imm16);
void Lui(Register rt, uint16_t imm16);
- void Mfhi(Register rd);
- void Mflo(Register rd);
+ void Sync(uint32_t stype);
+ void Mfhi(Register rd); // R2
+ void Mflo(Register rd); // R2
void Sb(Register rt, Register rs, uint16_t imm16);
void Sh(Register rt, Register rs, uint16_t imm16);
@@ -99,81 +160,138 @@ class MipsAssembler FINAL : public Assembler {
void Slti(Register rt, Register rs, uint16_t imm16);
void Sltiu(Register rt, Register rs, uint16_t imm16);
- void Beq(Register rt, Register rs, uint16_t imm16);
- void Bne(Register rt, Register rs, uint16_t imm16);
- void J(uint32_t address);
- void Jal(uint32_t address);
- void Jr(Register rs);
+ void B(uint16_t imm16);
+ void Beq(Register rs, Register rt, uint16_t imm16);
+ void Bne(Register rs, Register rt, uint16_t imm16);
+ void Beqz(Register rt, uint16_t imm16);
+ void Bnez(Register rt, uint16_t imm16);
+ void Bltz(Register rt, uint16_t imm16);
+ void Bgez(Register rt, uint16_t imm16);
+ void Blez(Register rt, uint16_t imm16);
+ void Bgtz(Register rt, uint16_t imm16);
+ void J(uint32_t addr26);
+ void Jal(uint32_t addr26);
+ void Jalr(Register rd, Register rs);
void Jalr(Register rs);
+ void Jr(Register rs);
+ void Nal();
+ void Auipc(Register rs, uint16_t imm16); // R6
+ void Addiupc(Register rs, uint32_t imm19); // R6
+ void Bc(uint32_t imm26); // R6
+ void Jic(Register rt, uint16_t imm16); // R6
+ void Jialc(Register rt, uint16_t imm16); // R6
+ void Bltc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bltzc(Register rt, uint16_t imm16); // R6
+ void Bgtzc(Register rt, uint16_t imm16); // R6
+ void Bgec(Register rs, Register rt, uint16_t imm16); // R6
+ void Bgezc(Register rt, uint16_t imm16); // R6
+ void Blezc(Register rt, uint16_t imm16); // R6
+ void Bltuc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bgeuc(Register rs, Register rt, uint16_t imm16); // R6
+ void Beqc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bnec(Register rs, Register rt, uint16_t imm16); // R6
+ void Beqzc(Register rs, uint32_t imm21); // R6
+ void Bnezc(Register rs, uint32_t imm21); // R6
void AddS(FRegister fd, FRegister fs, FRegister ft);
void SubS(FRegister fd, FRegister fs, FRegister ft);
void MulS(FRegister fd, FRegister fs, FRegister ft);
void DivS(FRegister fd, FRegister fs, FRegister ft);
- void AddD(DRegister fd, DRegister fs, DRegister ft);
- void SubD(DRegister fd, DRegister fs, DRegister ft);
- void MulD(DRegister fd, DRegister fs, DRegister ft);
- void DivD(DRegister fd, DRegister fs, DRegister ft);
+ void AddD(FRegister fd, FRegister fs, FRegister ft);
+ void SubD(FRegister fd, FRegister fs, FRegister ft);
+ void MulD(FRegister fd, FRegister fs, FRegister ft);
+ void DivD(FRegister fd, FRegister fs, FRegister ft);
void MovS(FRegister fd, FRegister fs);
- void MovD(DRegister fd, DRegister fs);
+ void MovD(FRegister fd, FRegister fs);
+ void NegS(FRegister fd, FRegister fs);
+ void NegD(FRegister fd, FRegister fs);
+
+ void Cvtsw(FRegister fd, FRegister fs);
+ void Cvtdw(FRegister fd, FRegister fs);
+ void Cvtsd(FRegister fd, FRegister fs);
+ void Cvtds(FRegister fd, FRegister fs);
void Mfc1(Register rt, FRegister fs);
- void Mtc1(FRegister ft, Register rs);
+ void Mtc1(Register rt, FRegister fs);
+ void Mfhc1(Register rt, FRegister fs);
+ void Mthc1(Register rt, FRegister fs);
void Lwc1(FRegister ft, Register rs, uint16_t imm16);
- void Ldc1(DRegister ft, Register rs, uint16_t imm16);
+ void Ldc1(FRegister ft, Register rs, uint16_t imm16);
void Swc1(FRegister ft, Register rs, uint16_t imm16);
- void Sdc1(DRegister ft, Register rs, uint16_t imm16);
+ void Sdc1(FRegister ft, Register rs, uint16_t imm16);
void Break();
void Nop();
- void Move(Register rt, Register rs);
- void Clear(Register rt);
- void Not(Register rt, Register rs);
- void Mul(Register rd, Register rs, Register rt);
- void Div(Register rd, Register rs, Register rt);
- void Rem(Register rd, Register rs, Register rt);
-
- void AddConstant(Register rt, Register rs, int32_t value);
- void LoadImmediate(Register rt, int32_t value);
+ void Move(Register rd, Register rs);
+ void Clear(Register rd);
+ void Not(Register rd, Register rs);
+
+ // Higher level composite instructions.
+ void LoadConst32(Register rd, int32_t value);
+ void LoadConst64(Register reg_hi, Register reg_lo, int64_t value);
+ void LoadDConst64(FRegister rd, int64_t value, Register temp);
+ void LoadSConst32(FRegister r, int32_t value, Register temp);
+ void StoreConst32ToOffset(int32_t value, Register base, int32_t offset, Register temp);
+ void StoreConst64ToOffset(int64_t value, Register base, int32_t offset, Register temp);
+ void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT);
+
+ // These will generate R2 branches or R6 branches as appropriate.
+ void Bind(MipsLabel* label);
+ void B(MipsLabel* label);
+ void Jalr(MipsLabel* label, Register indirect_reg);
+ void Beq(Register rs, Register rt, MipsLabel* label);
+ void Bne(Register rs, Register rt, MipsLabel* label);
+ void Beqz(Register rt, MipsLabel* label);
+ void Bnez(Register rt, MipsLabel* label);
+ void Bltz(Register rt, MipsLabel* label);
+ void Bgez(Register rt, MipsLabel* label);
+ void Blez(Register rt, MipsLabel* label);
+ void Bgtz(Register rt, MipsLabel* label);
+ void Blt(Register rs, Register rt, MipsLabel* label);
+ void Bge(Register rs, Register rt, MipsLabel* label);
+ void Bltu(Register rs, Register rt, MipsLabel* label);
+ void Bgeu(Register rs, Register rt, MipsLabel* label);
void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
- void LoadDFromOffset(DRegister reg, Register base, int32_t offset);
+ void LoadDFromOffset(FRegister reg, Register base, int32_t offset);
void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
void StoreSToOffset(FRegister reg, Register base, int32_t offset);
- void StoreDToOffset(DRegister reg, Register base, int32_t offset);
+ void StoreDToOffset(FRegister reg, Register base, int32_t offset);
// Emit data (e.g. encoded instruction or immediate) to the instruction stream.
- void Emit(int32_t value);
- void EmitBranch(Register rt, Register rs, Label* label, bool equal);
- void EmitJump(Label* label, bool link);
- void Bind(Label* label, bool is_jump);
+ void Emit(uint32_t value);
+
+ // Push/pop composite routines.
+ void Push(Register rs);
+ void Pop(Register rd);
+ void PopAndReturn(Register rd, Register rt);
void Bind(Label* label) OVERRIDE {
- Bind(label, false);
+ Bind(down_cast<MipsLabel*>(label));
}
- void Jump(Label* label) OVERRIDE {
- EmitJump(label, false);
+ void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
//
- // Overridden common assembler high-level functionality
+ // Overridden common assembler high-level functionality.
//
- // Emit code that will create an activation on the stack
+ // Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
- // Emit code that will remove an activation from the stack
+ // Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
- // Store routines
+ // Store routines.
void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
@@ -191,7 +309,7 @@ class MipsAssembler FINAL : public Assembler {
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
- // Load routines
+ // Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
void LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) OVERRIDE;
@@ -205,7 +323,7 @@ class MipsAssembler FINAL : public Assembler {
void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<4> offs) OVERRIDE;
- // Copying routines
+ // Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
@@ -235,13 +353,13 @@ class MipsAssembler FINAL : public Assembler {
void MemoryBarrier(ManagedRegister) OVERRIDE;
- // Sign extension
+ // Sign extension.
void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
- // Zero extension
+ // Zero extension.
void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
- // Exploit fast access in managed code to Thread::Current()
+ // Exploit fast access in managed code to Thread::Current().
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
@@ -257,7 +375,7 @@ class MipsAssembler FINAL : public Assembler {
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister mscratch, bool null_allowed) OVERRIDE;
- // src holds a handle scope entry (Object**) load this into dst
+ // src holds a handle scope entry (Object**) load this into dst.
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
@@ -265,7 +383,7 @@ class MipsAssembler FINAL : public Assembler {
void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
- // Call to address held at [base+offset]
+ // Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void CallFromThread32(ThreadOffset<4> offset, ManagedRegister mscratch) OVERRIDE;
@@ -274,41 +392,251 @@ class MipsAssembler FINAL : public Assembler {
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ // Emit slow paths queued during assembly and promote short branches to long if needed.
+ void FinalizeCode() OVERRIDE;
+
+ // Emit branches and finalize all instructions.
+ void FinalizeInstructions(const MemoryRegion& region);
+
+ // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS,
+ // must be used instead of MipsLabel::GetPosition()).
+ uint32_t GetLabelLocation(MipsLabel* label) const;
+
+ // Get the final position of a label after local fixup based on the old position
+ // recorded before FinalizeCode().
+ uint32_t GetAdjustedPosition(uint32_t old_position);
+
+ enum BranchCondition {
+ kCondLT,
+ kCondGE,
+ kCondLE,
+ kCondGT,
+ kCondLTZ,
+ kCondGEZ,
+ kCondLEZ,
+ kCondGTZ,
+ kCondEQ,
+ kCondNE,
+ kCondEQZ,
+ kCondNEZ,
+ kCondLTU,
+ kCondGEU,
+ kUncond,
+ };
+ friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
+
private:
+ class Branch {
+ public:
+ enum Type {
+ // R2 short branches.
+ kUncondBranch,
+ kCondBranch,
+ kCall,
+ // R2 long branches.
+ kLongUncondBranch,
+ kLongCondBranch,
+ kLongCall,
+ // R6 short branches.
+ kR6UncondBranch,
+ kR6CondBranch,
+ kR6Call,
+ // R6 long branches.
+ kR6LongUncondBranch,
+ kR6LongCondBranch,
+ kR6LongCall,
+ };
+ // Bit sizes of offsets defined as enums to minimize chance of typos.
+ enum OffsetBits {
+ kOffset16 = 16,
+ kOffset18 = 18,
+ kOffset21 = 21,
+ kOffset23 = 23,
+ kOffset28 = 28,
+ kOffset32 = 32,
+ };
+
+ static constexpr uint32_t kUnresolved = 0xffffffff; // Unresolved target_
+ static constexpr int32_t kMaxBranchLength = 32;
+ static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
+
+ struct BranchInfo {
+ // Branch length as a number of 4-byte-long instructions.
+ uint32_t length;
+ // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's
+ // PC-relative offset (or its most significant 16-bit half, which goes first).
+ uint32_t instr_offset;
+ // Different MIPS instructions with PC-relative offsets apply said offsets to slightly
+ // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte
+ // instructions) from the instruction containing the offset.
+ uint32_t pc_org;
+ // How large (in bits) a PC-relative offset can be for a given type of branch (kR6CondBranch
+ // is an exception: use kOffset23 for beqzc/bnezc).
+ OffsetBits offset_size;
+ // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift
+ // count.
+ int offset_shift;
+ };
+ static const BranchInfo branch_info_[/* Type */];
+
+ // Unconditional branch.
+ Branch(bool is_r6, uint32_t location, uint32_t target);
+ // Conditional branch.
+ Branch(bool is_r6,
+ uint32_t location,
+ uint32_t target,
+ BranchCondition condition,
+ Register lhs_reg,
+ Register rhs_reg = ZERO);
+ // Call (branch and link) that stores the target address in a given register (i.e. T9).
+ Branch(bool is_r6, uint32_t location, uint32_t target, Register indirect_reg);
+
+ // Some conditional branches with lhs = rhs are effectively NOPs, while some
+ // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
+ // So, we need a way to identify such branches in order to emit no instructions for them
+ // or change them to unconditional.
+ static bool IsNop(BranchCondition condition, Register lhs, Register rhs);
+ static bool IsUncond(BranchCondition condition, Register lhs, Register rhs);
+
+ static BranchCondition OppositeCondition(BranchCondition cond);
+
+ Type GetType() const;
+ BranchCondition GetCondition() const;
+ Register GetLeftRegister() const;
+ Register GetRightRegister() const;
+ uint32_t GetTarget() const;
+ uint32_t GetLocation() const;
+ uint32_t GetOldLocation() const;
+ uint32_t GetLength() const;
+ uint32_t GetOldLength() const;
+ uint32_t GetSize() const;
+ uint32_t GetOldSize() const;
+ uint32_t GetEndLocation() const;
+ uint32_t GetOldEndLocation() const;
+ bool IsLong() const;
+ bool IsResolved() const;
+
+ // Returns the bit size of the signed offset that the branch instruction can handle.
+ OffsetBits GetOffsetSize() const;
+
+ // Calculates the distance between two byte locations in the assembler buffer and
+ // returns the number of bits needed to represent the distance as a signed integer.
+ //
+ // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc),
+ // and 26 (bc) bits, which are additionally shifted left 2 positions at run time.
+ //
+ // Composite branches (made of several instructions) with longer reach have 32-bit
+ // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first).
+ // The composite branches cover the range of PC + +/-2GB.
+ //
+ // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special
+ // case with the addiu instruction and a 16 bit offset.
+ static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target);
+
+ // Resolve a branch when the target is known.
+ void Resolve(uint32_t target);
+
+ // Relocate a branch by a given delta if needed due to expansion of this or another
+ // branch at a given location by this delta (just changes location_ and target_).
+ void Relocate(uint32_t expand_location, uint32_t delta);
+
+ // If the branch is short, changes its type to long.
+ void PromoteToLong();
+
+ // If necessary, updates the type by promoting a short branch to a long branch
+ // based on the branch location and target. Returns the amount (in bytes) by
+ // which the branch size has increased.
+ // max_short_distance caps the maximum distance between location_ and target_
+ // that is allowed for short branches. This is for debugging/testing purposes.
+ // max_short_distance = 0 forces all short branches to become long.
+ // Use the implicit default argument when not debugging/testing.
+ uint32_t PromoteIfNeeded(uint32_t max_short_distance = std::numeric_limits<uint32_t>::max());
+
+ // Returns the location of the instruction(s) containing the offset.
+ uint32_t GetOffsetLocation() const;
+
+ // Calculates and returns the offset ready for encoding in the branch instruction(s).
+ uint32_t GetOffset() const;
+
+ private:
+ // Completes branch construction by determining and recording its type.
+ void InitializeType(bool is_call, bool is_r6);
+ // Helper for the above.
+ void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
+
+ uint32_t old_location_; // Offset into assembler buffer in bytes.
+ uint32_t location_; // Offset into assembler buffer in bytes.
+ uint32_t target_; // Offset into assembler buffer in bytes.
+
+ uint32_t lhs_reg_ : 5; // Left-hand side register in conditional branches or
+ // indirect call register.
+ uint32_t rhs_reg_ : 5; // Right-hand side register in conditional branches.
+ BranchCondition condition_ : 5; // Condition for conditional branches.
+
+ Type type_ : 5; // Current type of the branch.
+ Type old_type_ : 5; // Initial type of the branch.
+ };
+ friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
+ friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
+
void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
void EmitI(int opcode, Register rs, Register rt, uint16_t imm);
- void EmitJ(int opcode, int address);
+ void EmitI21(int opcode, Register rs, uint32_t imm21);
+ void EmitI26(int opcode, uint32_t imm26);
void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
-
- int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
- int DecodeBranchOffset(int32_t inst, bool is_jump);
-
- FRegister ConvertDRegToFReg(DRegister reg) {
- return static_cast<FRegister>(reg * 2);
- }
- Register ConvertDRegToReg(DRegister reg) {
- return static_cast<Register>(reg * 2);
+ void EmitBcond(BranchCondition cond, Register rs, Register rt, uint16_t imm16);
+ void EmitBcondc(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21); // R6
+
+ void Buncond(MipsLabel* label);
+ void Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs = ZERO);
+ void Call(MipsLabel* label, Register indirect_reg);
+ void FinalizeLabeledBranch(MipsLabel* label);
+
+ Branch* GetBranch(uint32_t branch_id);
+ const Branch* GetBranch(uint32_t branch_id) const;
+
+ void PromoteBranches();
+ void EmitBranch(Branch* branch);
+ void EmitBranches();
+
+ // Emits exception block.
+ void EmitExceptionPoll(MipsExceptionSlowPath* exception);
+
+ bool IsR6() const {
+ if (isa_features_ != nullptr) {
+ return isa_features_->IsR6();
+ } else {
+ return false;
+ }
}
- Register ConvertFRegToReg(FRegister reg) {
- return static_cast<Register>(reg);
- }
- FRegister ConvertRegToFReg(Register reg) {
- return static_cast<FRegister>(reg);
+
+ bool Is32BitFPU() const {
+ if (isa_features_ != nullptr) {
+ return isa_features_->Is32BitFloatingPoint();
+ } else {
+ return true;
+ }
}
- DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
-};
+ // List of exception blocks to generate at the end of the code cache.
+ std::vector<MipsExceptionSlowPath> exception_blocks_;
-// Slowpath entered when Thread::Current()->_exception is non-null
-class MipsExceptionSlowPath FINAL : public SlowPath {
- public:
- MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const MipsManagedRegister scratch_;
- const size_t stack_adjust_;
+ std::vector<Branch> branches_;
+
+ // Whether appending instructions at the end of the buffer or overwriting the existing ones.
+ bool overwriting_;
+ // The current overwrite location.
+ uint32_t overwrite_location_;
+
+ // Data for AdjustedPosition(), see the description there.
+ uint32_t last_position_adjustment_;
+ uint32_t last_old_position_;
+ uint32_t last_branch_id_;
+
+ const MipsInstructionSetFeatures* isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
};
} // namespace mips
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
new file mode 100644
index 0000000000..063d8bd825
--- /dev/null
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips.h"
+
+#include <map>
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+struct MIPSCpuRegisterCompare {
+ bool operator()(const mips::Register& a, const mips::Register& b) const {
+ return a < b;
+ }
+};
+
+class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
+ mips::Register,
+ mips::FRegister,
+ uint32_t> {
+ public:
+ typedef AssemblerTest<mips::MipsAssembler, mips::Register, mips::FRegister, uint32_t> Base;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "mips";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --no-warn -32 -march=mips32r2";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mmips:isa32r2";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.push_back(new mips::Register(mips::ZERO));
+ registers_.push_back(new mips::Register(mips::AT));
+ registers_.push_back(new mips::Register(mips::V0));
+ registers_.push_back(new mips::Register(mips::V1));
+ registers_.push_back(new mips::Register(mips::A0));
+ registers_.push_back(new mips::Register(mips::A1));
+ registers_.push_back(new mips::Register(mips::A2));
+ registers_.push_back(new mips::Register(mips::A3));
+ registers_.push_back(new mips::Register(mips::T0));
+ registers_.push_back(new mips::Register(mips::T1));
+ registers_.push_back(new mips::Register(mips::T2));
+ registers_.push_back(new mips::Register(mips::T3));
+ registers_.push_back(new mips::Register(mips::T4));
+ registers_.push_back(new mips::Register(mips::T5));
+ registers_.push_back(new mips::Register(mips::T6));
+ registers_.push_back(new mips::Register(mips::T7));
+ registers_.push_back(new mips::Register(mips::S0));
+ registers_.push_back(new mips::Register(mips::S1));
+ registers_.push_back(new mips::Register(mips::S2));
+ registers_.push_back(new mips::Register(mips::S3));
+ registers_.push_back(new mips::Register(mips::S4));
+ registers_.push_back(new mips::Register(mips::S5));
+ registers_.push_back(new mips::Register(mips::S6));
+ registers_.push_back(new mips::Register(mips::S7));
+ registers_.push_back(new mips::Register(mips::T8));
+ registers_.push_back(new mips::Register(mips::T9));
+ registers_.push_back(new mips::Register(mips::K0));
+ registers_.push_back(new mips::Register(mips::K1));
+ registers_.push_back(new mips::Register(mips::GP));
+ registers_.push_back(new mips::Register(mips::SP));
+ registers_.push_back(new mips::Register(mips::FP));
+ registers_.push_back(new mips::Register(mips::RA));
+
+ secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
+ secondary_register_names_.emplace(mips::Register(mips::AT), "at");
+ secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
+ secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
+ secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
+ secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
+ secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
+ secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
+ secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
+ secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
+ secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
+ secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
+ secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
+ secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
+ secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
+ secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
+ secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
+ secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
+ secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
+ secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
+ secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
+ secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
+ secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
+ secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
+ secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
+ secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
+ secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
+ secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
+ secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
+ secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
+ secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
+ secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
+
+ fp_registers_.push_back(new mips::FRegister(mips::F0));
+ fp_registers_.push_back(new mips::FRegister(mips::F1));
+ fp_registers_.push_back(new mips::FRegister(mips::F2));
+ fp_registers_.push_back(new mips::FRegister(mips::F3));
+ fp_registers_.push_back(new mips::FRegister(mips::F4));
+ fp_registers_.push_back(new mips::FRegister(mips::F5));
+ fp_registers_.push_back(new mips::FRegister(mips::F6));
+ fp_registers_.push_back(new mips::FRegister(mips::F7));
+ fp_registers_.push_back(new mips::FRegister(mips::F8));
+ fp_registers_.push_back(new mips::FRegister(mips::F9));
+ fp_registers_.push_back(new mips::FRegister(mips::F10));
+ fp_registers_.push_back(new mips::FRegister(mips::F11));
+ fp_registers_.push_back(new mips::FRegister(mips::F12));
+ fp_registers_.push_back(new mips::FRegister(mips::F13));
+ fp_registers_.push_back(new mips::FRegister(mips::F14));
+ fp_registers_.push_back(new mips::FRegister(mips::F15));
+ fp_registers_.push_back(new mips::FRegister(mips::F16));
+ fp_registers_.push_back(new mips::FRegister(mips::F17));
+ fp_registers_.push_back(new mips::FRegister(mips::F18));
+ fp_registers_.push_back(new mips::FRegister(mips::F19));
+ fp_registers_.push_back(new mips::FRegister(mips::F20));
+ fp_registers_.push_back(new mips::FRegister(mips::F21));
+ fp_registers_.push_back(new mips::FRegister(mips::F22));
+ fp_registers_.push_back(new mips::FRegister(mips::F23));
+ fp_registers_.push_back(new mips::FRegister(mips::F24));
+ fp_registers_.push_back(new mips::FRegister(mips::F25));
+ fp_registers_.push_back(new mips::FRegister(mips::F26));
+ fp_registers_.push_back(new mips::FRegister(mips::F27));
+ fp_registers_.push_back(new mips::FRegister(mips::F28));
+ fp_registers_.push_back(new mips::FRegister(mips::F29));
+ fp_registers_.push_back(new mips::FRegister(mips::F30));
+ fp_registers_.push_back(new mips::FRegister(mips::F31));
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
+ }
+
+ std::string RepeatInsn(size_t count, const std::string& insn) {
+ std::string result;
+ for (; count != 0u; --count) {
+ result += insn;
+ }
+ return result;
+ }
+
+ private:
+ std::vector<mips::Register*> registers_;
+ std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
+
+ std::vector<mips::FRegister*> fp_registers_;
+};
+
+
+TEST_F(AssemblerMIPSTest, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+#define __ GetAssembler()->
+
+TEST_F(AssemblerMIPSTest, Addu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Addu, "addu ${reg1}, ${reg2}, ${reg3}"), "Addu");
+}
+
+TEST_F(AssemblerMIPSTest, Addiu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Addiu, -16, "addiu ${reg1}, ${reg2}, {imm}"), "Addiu");
+}
+
+TEST_F(AssemblerMIPSTest, Subu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Subu, "subu ${reg1}, ${reg2}, ${reg3}"), "Subu");
+}
+
+TEST_F(AssemblerMIPSTest, MultR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::MultR2, "mult ${reg1}, ${reg2}"), "MultR2");
+}
+
+TEST_F(AssemblerMIPSTest, MultuR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::MultuR2, "multu ${reg1}, ${reg2}"), "MultuR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivR2Basic) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg1}, ${reg2}"), "DivR2Basic");
+}
+
+TEST_F(AssemblerMIPSTest, DivuR2Basic) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg1}, ${reg2}"), "DivuR2Basic");
+}
+
+TEST_F(AssemblerMIPSTest, MulR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::MulR2, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
+ "DivR2");
+}
+
+TEST_F(AssemblerMIPSTest, ModR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModR2, "div $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
+ "ModR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivuR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
+ "DivuR2");
+}
+
+TEST_F(AssemblerMIPSTest, ModuR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR2, "divu $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
+ "ModuR2");
+}
+
+TEST_F(AssemblerMIPSTest, And) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::And, "and ${reg1}, ${reg2}, ${reg3}"), "And");
+}
+
+TEST_F(AssemblerMIPSTest, Andi) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Andi, 16, "andi ${reg1}, ${reg2}, {imm}"), "Andi");
+}
+
+TEST_F(AssemblerMIPSTest, Or) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Or, "or ${reg1}, ${reg2}, ${reg3}"), "Or");
+}
+
+TEST_F(AssemblerMIPSTest, Ori) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Ori, 16, "ori ${reg1}, ${reg2}, {imm}"), "Ori");
+}
+
+TEST_F(AssemblerMIPSTest, Xor) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Xor, "xor ${reg1}, ${reg2}, ${reg3}"), "Xor");
+}
+
+TEST_F(AssemblerMIPSTest, Xori) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Xori, 16, "xori ${reg1}, ${reg2}, {imm}"), "Xori");
+}
+
+TEST_F(AssemblerMIPSTest, Nor) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "Nor");
+}
+
+TEST_F(AssemblerMIPSTest, Seb) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Seb, "seb ${reg1}, ${reg2}"), "Seb");
+}
+
+TEST_F(AssemblerMIPSTest, Seh) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Seh, "seh ${reg1}, ${reg2}"), "Seh");
+}
+
+TEST_F(AssemblerMIPSTest, Sll) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "Sll");
+}
+
+TEST_F(AssemblerMIPSTest, Srl) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "Srl");
+}
+
+TEST_F(AssemblerMIPSTest, Sra) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "Sra");
+}
+
+TEST_F(AssemblerMIPSTest, Sllv) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "Sllv");
+}
+
+TEST_F(AssemblerMIPSTest, Srlv) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "Srlv");
+}
+
+TEST_F(AssemblerMIPSTest, Srav) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav");
+}
+
+TEST_F(AssemblerMIPSTest, Lb) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb");
+}
+
+TEST_F(AssemblerMIPSTest, Lh) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "Lh");
+}
+
+TEST_F(AssemblerMIPSTest, Lw) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "Lw");
+}
+
+TEST_F(AssemblerMIPSTest, Lbu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "Lbu");
+}
+
+TEST_F(AssemblerMIPSTest, Lhu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lhu, -16, "lhu ${reg1}, {imm}(${reg2})"), "Lhu");
+}
+
+TEST_F(AssemblerMIPSTest, Lui) {
+ DriverStr(RepeatRIb(&mips::MipsAssembler::Lui, 16, "lui ${reg}, {imm}"), "Lui");
+}
+
+TEST_F(AssemblerMIPSTest, Mfhi) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Mfhi, "mfhi ${reg}"), "Mfhi");
+}
+
+TEST_F(AssemblerMIPSTest, Mflo) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Mflo, "mflo ${reg}"), "Mflo");
+}
+
+TEST_F(AssemblerMIPSTest, Sb) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sb, -16, "sb ${reg1}, {imm}(${reg2})"), "Sb");
+}
+
+TEST_F(AssemblerMIPSTest, Sh) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "Sh");
+}
+
+TEST_F(AssemblerMIPSTest, Sw) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "Sw");
+}
+
+TEST_F(AssemblerMIPSTest, Slt) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "Slt");
+}
+
+TEST_F(AssemblerMIPSTest, Sltu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Sltu, "sltu ${reg1}, ${reg2}, ${reg3}"), "Sltu");
+}
+
+TEST_F(AssemblerMIPSTest, Slti) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Slti, -16, "slti ${reg1}, ${reg2}, {imm}"), "Slti");
+}
+
+TEST_F(AssemblerMIPSTest, Sltiu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sltiu, -16, "sltiu ${reg1}, ${reg2}, {imm}"), "Sltiu");
+}
+
+TEST_F(AssemblerMIPSTest, AddS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::AddS, "add.s ${reg1}, ${reg2}, ${reg3}"), "AddS");
+}
+
+TEST_F(AssemblerMIPSTest, AddD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::AddD, "add.d ${reg1}, ${reg2}, ${reg3}"), "AddD");
+}
+
+TEST_F(AssemblerMIPSTest, SubS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SubS, "sub.s ${reg1}, ${reg2}, ${reg3}"), "SubS");
+}
+
+TEST_F(AssemblerMIPSTest, SubD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SubD, "sub.d ${reg1}, ${reg2}, ${reg3}"), "SubD");
+}
+
+TEST_F(AssemblerMIPSTest, MulS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MulS, "mul.s ${reg1}, ${reg2}, ${reg3}"), "MulS");
+}
+
+TEST_F(AssemblerMIPSTest, MulD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MulD, "mul.d ${reg1}, ${reg2}, ${reg3}"), "MulD");
+}
+
+TEST_F(AssemblerMIPSTest, DivS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::DivS, "div.s ${reg1}, ${reg2}, ${reg3}"), "DivS");
+}
+
+TEST_F(AssemblerMIPSTest, DivD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::DivD, "div.d ${reg1}, ${reg2}, ${reg3}"), "DivD");
+}
+
+TEST_F(AssemblerMIPSTest, MovS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::MovS, "mov.s ${reg1}, ${reg2}"), "MovS");
+}
+
+TEST_F(AssemblerMIPSTest, MovD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::MovD, "mov.d ${reg1}, ${reg2}"), "MovD");
+}
+
+TEST_F(AssemblerMIPSTest, NegS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::NegS, "neg.s ${reg1}, ${reg2}"), "NegS");
+}
+
+TEST_F(AssemblerMIPSTest, NegD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::NegD, "neg.d ${reg1}, ${reg2}"), "NegD");
+}
+
+TEST_F(AssemblerMIPSTest, CvtSW) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW");
+}
+
+TEST_F(AssemblerMIPSTest, CvtDW) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "CvtDW");
+}
+
+TEST_F(AssemblerMIPSTest, CvtSD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "CvtSD");
+}
+
+TEST_F(AssemblerMIPSTest, CvtDS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "CvtDS");
+}
+
+TEST_F(AssemblerMIPSTest, Mfc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mfc1, "mfc1 ${reg1}, ${reg2}"), "Mfc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mtc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mtc1, "mtc1 ${reg1}, ${reg2}"), "Mtc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mfhc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mfhc1, "mfhc1 ${reg1}, ${reg2}"), "Mfhc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mthc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mthc1, "mthc1 ${reg1}, ${reg2}"), "Mthc1");
+}
+
+TEST_F(AssemblerMIPSTest, Lwc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Lwc1, -16, "lwc1 ${reg1}, {imm}(${reg2})"), "Lwc1");
+}
+
+TEST_F(AssemblerMIPSTest, Ldc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Ldc1, -16, "ldc1 ${reg1}, {imm}(${reg2})"), "Ldc1");
+}
+
+TEST_F(AssemblerMIPSTest, Swc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Swc1, -16, "swc1 ${reg1}, {imm}(${reg2})"), "Swc1");
+}
+
+TEST_F(AssemblerMIPSTest, Sdc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Sdc1, -16, "sdc1 ${reg1}, {imm}(${reg2})"), "Sdc1");
+}
+
+TEST_F(AssemblerMIPSTest, Move) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Move, "or ${reg1}, ${reg2}, $zero"), "Move");
+}
+
+TEST_F(AssemblerMIPSTest, Clear) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Clear, "or ${reg}, $zero, $zero"), "Clear");
+}
+
+TEST_F(AssemblerMIPSTest, Not) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Not, "nor ${reg1}, ${reg2}, $zero"), "Not");
+}
+
+TEST_F(AssemblerMIPSTest, LoadFromOffset) {
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A1, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 256);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 1000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x10000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x12345678);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -256);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+
+ const char* expected =
+ "lb $a0, 0($a0)\n"
+ "lb $a0, 0($a1)\n"
+ "lb $a0, 256($a1)\n"
+ "lb $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lb $a0, -256($a1)\n"
+ "lb $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+
+ "lbu $a0, 0($a0)\n"
+ "lbu $a0, 0($a1)\n"
+ "lbu $a0, 256($a1)\n"
+ "lbu $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lbu $a0, -256($a1)\n"
+ "lbu $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+
+ "lh $a0, 0($a0)\n"
+ "lh $a0, 0($a1)\n"
+ "lh $a0, 256($a1)\n"
+ "lh $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lh $a0, -256($a1)\n"
+ "lh $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+
+ "lhu $a0, 0($a0)\n"
+ "lhu $a0, 0($a1)\n"
+ "lhu $a0, 256($a1)\n"
+ "lhu $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lhu $a0, -256($a1)\n"
+ "lhu $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+
+ "lw $a0, 0($a0)\n"
+ "lw $a0, 0($a1)\n"
+ "lw $a0, 256($a1)\n"
+ "lw $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lw $a0, -256($a1)\n"
+ "lw $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+
+ "lw $a1, 4($a0)\n"
+ "lw $a0, 0($a0)\n"
+ "lw $a0, 0($a1)\n"
+ "lw $a1, 4($a1)\n"
+ "lw $a1, 0($a0)\n"
+ "lw $a2, 4($a0)\n"
+ "lw $a0, 0($a2)\n"
+ "lw $a1, 4($a2)\n"
+ "lw $a0, 256($a2)\n"
+ "lw $a1, 260($a2)\n"
+ "lw $a0, 1000($a2)\n"
+ "lw $a1, 1004($a2)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lw $a0, -256($a2)\n"
+ "lw $a1, -252($a2)\n"
+ "lw $a0, 0xFFFF8000($a2)\n"
+ "lw $a1, 0xFFFF8004($a2)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n";
+ DriverStr(expected, "LoadFromOffset");
+}
+
+TEST_F(AssemblerMIPSTest, LoadSFromOffset) {
+ __ LoadSFromOffset(mips::F0, mips::A0, 0);
+ __ LoadSFromOffset(mips::F0, mips::A0, 4);
+ __ LoadSFromOffset(mips::F0, mips::A0, 256);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x8000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x10000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x12345678);
+ __ LoadSFromOffset(mips::F0, mips::A0, -256);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "lwc1 $f0, 0($a0)\n"
+ "lwc1 $f0, 4($a0)\n"
+ "lwc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lwc1 $f0, -256($a0)\n"
+ "lwc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n";
+ DriverStr(expected, "LoadSFromOffset");
+}
+
+
+TEST_F(AssemblerMIPSTest, LoadDFromOffset) {
+ __ LoadDFromOffset(mips::F0, mips::A0, 0);
+ __ LoadDFromOffset(mips::F0, mips::A0, 4);
+ __ LoadDFromOffset(mips::F0, mips::A0, 256);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x10000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x12345678);
+ __ LoadDFromOffset(mips::F0, mips::A0, -256);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "ldc1 $f0, 0($a0)\n"
+ "lwc1 $f0, 4($a0)\n"
+ "lwc1 $f1, 8($a0)\n"
+ "ldc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "ldc1 $f0, -256($a0)\n"
+ "ldc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n";
+ DriverStr(expected, "LoadDFromOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreToOffset) {
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 256);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 1000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x10000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x12345678);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -256);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+
+ const char* expected =
+ "sb $a0, 0($a0)\n"
+ "sb $a0, 0($a1)\n"
+ "sb $a0, 256($a1)\n"
+ "sb $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "sb $a0, -256($a1)\n"
+ "sb $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+
+ "sh $a0, 0($a0)\n"
+ "sh $a0, 0($a1)\n"
+ "sh $a0, 256($a1)\n"
+ "sh $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "sh $a0, -256($a1)\n"
+ "sh $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+
+ "sw $a0, 0($a0)\n"
+ "sw $a0, 0($a1)\n"
+ "sw $a0, 256($a1)\n"
+ "sw $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "sw $a0, -256($a1)\n"
+ "sw $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+
+ "sw $a0, 0($a2)\n"
+ "sw $a1, 4($a2)\n"
+ "sw $a0, 256($a2)\n"
+ "sw $a1, 260($a2)\n"
+ "sw $a0, 1000($a2)\n"
+ "sw $a1, 1004($a2)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "sw $a0, -256($a2)\n"
+ "sw $a1, -252($a2)\n"
+ "sw $a0, 0xFFFF8000($a2)\n"
+ "sw $a1, 0xFFFF8004($a2)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n";
+ DriverStr(expected, "StoreToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreSToOffset) {
+ __ StoreSToOffset(mips::F0, mips::A0, 0);
+ __ StoreSToOffset(mips::F0, mips::A0, 4);
+ __ StoreSToOffset(mips::F0, mips::A0, 256);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x8000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x10000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x12345678);
+ __ StoreSToOffset(mips::F0, mips::A0, -256);
+ __ StoreSToOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "swc1 $f0, 0($a0)\n"
+ "swc1 $f0, 4($a0)\n"
+ "swc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "swc1 $f0, -256($a0)\n"
+ "swc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n";
+ DriverStr(expected, "StoreSToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreDToOffset) {
+ __ StoreDToOffset(mips::F0, mips::A0, 0);
+ __ StoreDToOffset(mips::F0, mips::A0, 4);
+ __ StoreDToOffset(mips::F0, mips::A0, 256);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x10000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x12345678);
+ __ StoreDToOffset(mips::F0, mips::A0, -256);
+ __ StoreDToOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "sdc1 $f0, 0($a0)\n"
+ "swc1 $f0, 4($a0)\n"
+ "swc1 $f1, 8($a0)\n"
+ "sdc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "sdc1 $f0, -256($a0)\n"
+ "sdc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n";
+ DriverStr(expected, "StoreDToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, B) {
+ mips::MipsLabel label1, label2;
+ __ B(&label1);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label1);
+ __ B(&label2);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label2);
+ __ B(&label1);
+
+ std::string expected =
+ ".set noreorder\n"
+ "b 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ "b 2f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ "b 1b\n"
+ "nop\n";
+ DriverStr(expected, "B");
+}
+
+TEST_F(AssemblerMIPSTest, Beq) {
+ mips::MipsLabel label;
+ __ Beq(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Beq(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "beq $a0, $a1, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "beq $a2, $a3, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Beq");
+}
+
+TEST_F(AssemblerMIPSTest, Bne) {
+ mips::MipsLabel label;
+ __ Bne(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bne(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bne $a0, $a1, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bne $a2, $a3, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bne");
+}
+
+TEST_F(AssemblerMIPSTest, Beqz) {
+ mips::MipsLabel label;
+ __ Beqz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Beqz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "beq $zero, $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "beq $zero, $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Beqz");
+}
+
+TEST_F(AssemblerMIPSTest, Bnez) {
+ mips::MipsLabel label;
+ __ Bnez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bnez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bne $zero, $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bne $zero, $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bnez");
+}
+
+TEST_F(AssemblerMIPSTest, Bltz) {
+ mips::MipsLabel label;
+ __ Bltz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bltz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltz $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bltz $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bltz");
+}
+
+TEST_F(AssemblerMIPSTest, Bgez) {
+ mips::MipsLabel label;
+ __ Bgez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bgez $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bgez $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgez");
+}
+
+TEST_F(AssemblerMIPSTest, Blez) {
+ mips::MipsLabel label;
+ __ Blez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Blez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "blez $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "blez $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Blez");
+}
+
+TEST_F(AssemblerMIPSTest, Bgtz) {
+ mips::MipsLabel label;
+ __ Bgtz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgtz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bgtz $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bgtz $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgtz");
+}
+
+TEST_F(AssemblerMIPSTest, Blt) {
+ mips::MipsLabel label;
+ __ Blt(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Blt(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "slt $at, $a0, $a1\n"
+ "bne $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "slt $at, $a2, $a3\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Blt");
+}
+
+TEST_F(AssemblerMIPSTest, Bge) {
+ mips::MipsLabel label;
+ __ Bge(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bge(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "slt $at, $a0, $a1\n"
+ "beq $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "slt $at, $a2, $a3\n"
+ "beq $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bge");
+}
+
+TEST_F(AssemblerMIPSTest, Bltu) {
+ mips::MipsLabel label;
+ __ Bltu(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bltu(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "sltu $at, $a0, $a1\n"
+ "bne $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "sltu $at, $a2, $a3\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bltu");
+}
+
+TEST_F(AssemblerMIPSTest, Bgeu) {
+ mips::MipsLabel label;
+ __ Bgeu(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgeu(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "sltu $at, $a0, $a1\n"
+ "beq $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "sltu $at, $a2, $a3\n"
+ "beq $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgeu");
+}
+
+#undef __
+
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 04e815aa1d..5347bf0302 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2369,44 +2369,48 @@ void X86Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = Low32Bits(v);
int32_t v_high = High32Bits(v);
if (buffer_.size() > 1) {
// Ensure we don't pass the end of the buffer.
for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 93ecdf52fe..b50fda907a 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -166,6 +166,39 @@ class Address : public Operand {
Init(base_in, disp.Int32Value());
}
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale_in, index_in, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ Init(base_in, index_in, scale_in, disp);
+ }
+
+ Address(Register base_in,
+ Register index_in,
+ ScaleFactor scale_in,
+ int32_t disp, AssemblerFixup *fixup) {
+ Init(base_in, index_in, scale_in, disp);
+ SetFixup(fixup);
+ }
+
+ static Address Absolute(uintptr_t addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset<4> addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
void Init(Register base_in, int32_t disp) {
if (disp == 0 && base_in != EBP) {
SetModRM(0, base_in);
@@ -181,14 +214,7 @@ class Address : public Operand {
}
}
- Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
- CHECK_NE(index_in, ESP); // Illegal addressing mode.
- SetModRM(0, ESP);
- SetSIB(scale_in, index_in, EBP);
- SetDisp32(disp);
- }
-
- Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ void Init(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
CHECK_NE(index_in, ESP); // Illegal addressing mode.
if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
@@ -203,20 +229,6 @@ class Address : public Operand {
SetDisp32(disp);
}
}
-
- static Address Absolute(uintptr_t addr) {
- Address result;
- result.SetModRM(0, EBP);
- result.SetDisp32(addr);
- return result;
- }
-
- static Address Absolute(ThreadOffset<4> addr) {
- return Absolute(addr.Int32Value());
- }
-
- private:
- Address() {}
};
@@ -252,40 +264,39 @@ class ConstantArea {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v);
+ size_t AddDouble(double v);
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v);
+ size_t AddFloat(float v);
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v);
+ size_t AddInt32(int32_t v);
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v);
+ size_t AddInt64(int64_t v);
bool IsEmpty() const {
return buffer_.size() == 0;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
-
- void AddFixup(AssemblerFixup* fixup) {
- fixups_.push_back(fixup);
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
}
- const std::vector<AssemblerFixup*>& GetFixups() const {
- return fixups_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
}
private:
- static constexpr size_t kEntrySize = sizeof(int32_t);
+ static constexpr size_t elem_size_ = sizeof(int32_t);
std::vector<int32_t> buffer_;
- std::vector<AssemblerFixup*> fixups_;
};
class X86Assembler FINAL : public Assembler {
@@ -740,26 +751,36 @@ class X86Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
- void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
private:
inline void EmitUint8(uint8_t value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 6e7d74d528..9eb5e67041 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -3122,7 +3122,14 @@ void X86_64Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
+ // Look for an existing match.
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
return i * elem_size_;
@@ -3130,12 +3137,10 @@ int ConstantArea::AddInt32(int32_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = v;
int32_t v_high = v >> 32;
if (buffer_.size() > 1) {
@@ -3148,18 +3153,18 @@ int ConstantArea::AddInt64(int64_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 255f551675..01d28e305d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -269,36 +269,40 @@ class Address : public Operand {
* Class to handle constant area values.
*/
class ConstantArea {
- public:
- ConstantArea() {}
+ public:
+ ConstantArea() {}
- // Add a double to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddDouble(double v);
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddDouble(double v);
- // Add a float to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddFloat(float v);
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddFloat(float v);
- // Add an int32_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt32(int32_t v);
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt32(int32_t v);
- // Add an int64_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt64(int64_t v);
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
- int GetSize() const {
- return buffer_.size() * elem_size_;
- }
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt64(int64_t v);
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
+ }
- private:
- static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
+ }
+
+ private:
+ static constexpr size_t elem_size_ = sizeof(int32_t);
+ std::vector<int32_t> buffer_;
};
@@ -806,19 +810,27 @@ class X86_64Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
@@ -826,6 +838,9 @@ class X86_64Assembler FINAL : public Assembler {
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; }
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
+
//
// Heap poisoning.
//
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 680e2d7b45..17c528209b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1401,7 +1401,7 @@ class Dex2Oat FINAL {
}
ScopedObjectAccess soa(self);
dex_caches_.push_back(soa.AddLocalReference<jobject>(
- class_linker->RegisterDexFile(*dex_file)));
+ class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index c55d285f9f..faa2d2ded9 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -280,6 +280,7 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
{ kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
{ kITypeMask, 49u << kOpcodeShift, "lwc1", "tO", },
+ { kJTypeMask, 50u << kOpcodeShift, "bc", "P" },
{ kITypeMask, 53u << kOpcodeShift, "ldc1", "tO", },
{ kITypeMask | (0x1f << 21), 54u << kOpcodeShift, "jic", "Ti" },
{ kITypeMask | (1 << 21), (54u << kOpcodeShift) | (1 << 21), "beqzc", "Sb" }, // TODO: de-dup?
@@ -290,6 +291,7 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 55u << kOpcodeShift, "ld", "TO", },
{ kITypeMask, 57u << kOpcodeShift, "swc1", "tO", },
{ kITypeMask | (0x1f << 16), (59u << kOpcodeShift) | (30 << 16), "auipc", "Si" },
+ { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (0 << 19), "addiupc", "Sp" },
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
{ kITypeMask | (0x1f << 21), 62u << kOpcodeShift, "jialc", "Ti" },
{ kITypeMask | (1 << 21), (62u << kOpcodeShift) | (1 << 21), "bnezc", "Sb" }, // TODO: de-dup?
@@ -432,6 +434,22 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
}
break;
+ case 'P': // 26-bit offset in bc.
+ {
+ int32_t offset = (instruction & 0x3ffffff) - ((instruction & 0x2000000) << 1);
+ offset <<= 2;
+ offset += 4;
+ args << FormatInstructionPointer(instr_ptr + offset);
+ args << StringPrintf(" ; %+d", offset);
+ }
+ break;
+ case 'p': // 19-bit offset in addiupc.
+ {
+ int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
+ args << offset << " ; move r" << rs << ", ";
+ args << FormatInstructionPointer(instr_ptr + (offset << 2));
+ }
+ break;
case 'S': args << 'r' << rs; break;
case 's': args << 'f' << rs; break;
case 'T': args << 'r' << rt; break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e2486041af..dbf536575a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,7 @@
#include <vector>
#include "arch/instruction_set_features.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
@@ -49,10 +50,12 @@
#include "mirror/object_array-inl.h"
#include "oat.h"
#include "oat_file-inl.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
+#include "stack_map.h"
#include "ScopedLocalRef.h"
#include "thread_list.h"
#include "verifier/dex_gc_map.h"
@@ -1417,8 +1420,10 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
Handle<mirror::DexCache> dex_cache(
- hs->NewHandle(Runtime::Current()->GetClassLinker()->RegisterDexFile(*dex_file)));
+ hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file,
+ runtime->GetLinearAlloc())));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1563,13 +1568,15 @@ class ImageDumper {
}
os << "\n";
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
std::string image_filename = image_space_.GetImageFilename();
std::string oat_location = ImageHeader::GetOatLocationFromImageLocation(image_filename);
os << "OAT LOCATION: " << oat_location;
os << "\n";
std::string error_msg;
- const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
+ const OatFile* oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(
+ oat_location);
if (oat_file == nullptr) {
oat_file = OatFile::Open(oat_location, oat_location,
nullptr, nullptr, false, nullptr,
@@ -1594,7 +1601,7 @@ class ImageDumper {
os << "OBJECTS:\n" << std::flush;
// Loop through all the image spaces and dump their objects.
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ gc::Heap* heap = runtime->GetHeap();
const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
Thread* self = Thread::Current();
{
@@ -1958,24 +1965,27 @@ class ImageDumper {
DCHECK(method != nullptr);
const auto image_pointer_size =
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
+ const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
+ const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
+ ArtCode art_code(method);
if (method->IsNative()) {
- DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
bool first_occurrence;
- const void* quick_oat_code = state->GetQuickOatCodeBegin(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
- state->ComputeOatSize(quick_oat_code, &first_occurrence);
+ state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
}
- if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
- indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code);
+ if (quick_oat_code_begin !=
+ method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
+ indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
}
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
- DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1983,29 +1993,27 @@ class ImageDumper {
bool first_occurrence;
size_t gc_map_bytes = state->ComputeOatSize(
- method->GetNativeGcMap(image_pointer_size), &first_occurrence);
+ art_code.GetNativeGcMap(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes = state->ComputeOatSize(
- method->GetMappingTable(image_pointer_size), &first_occurrence);
+ art_code.GetMappingTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes = 0u;
- if (!method->IsOptimized(image_pointer_size)) {
+ if (!art_code.IsOptimized(image_pointer_size)) {
// Method compiled with the optimizing compiler have no vmap table.
vmap_table_bytes = state->ComputeOatSize(
- method->GetVmapTable(image_pointer_size), &first_occurrence);
+ art_code.GetVmapTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
}
- const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
- const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
@@ -2394,13 +2402,13 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
// Need to register dex files to get a working dex cache.
ScopedObjectAccess soa(self);
ClassLinker* class_linker = runtime->GetClassLinker();
- class_linker->RegisterOatFile(oat_file);
+ runtime->GetOatFileManager().RegisterOatFile(std::unique_ptr<const OatFile>(oat_file));
std::vector<const DexFile*> class_path;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
- class_linker->RegisterDexFile(*dex_file);
+ class_linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc());
class_path.push_back(dex_file);
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index b8d3e13744..8fe3fa2df1 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,6 +19,7 @@ LOCAL_PATH := $(call my-dir)
include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
+ art_code.cc \
art_field.cc \
art_method.cc \
atomic.cc.arm \
@@ -154,6 +155,7 @@ LIBART_COMMON_SRC_FILES := \
oat.cc \
oat_file.cc \
oat_file_assistant.cc \
+ oat_file_manager.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 4a45f4970b..e676a09733 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -39,7 +39,7 @@ class ArchTest : public CommonRuntimeTest {
runtime->SetInstructionSet(isa);
ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
runtime->SetCalleeSaveMethod(save_method, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 8f6b1ff0a5..d5c7846951 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -38,8 +38,8 @@ void ArmContext::Reset() {
}
void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode art_code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = art_code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 4477631c67..cdc03fe16f 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -40,8 +40,8 @@ void Arm64Context::Reset() {
}
void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 9cfd2eb2d6..ff9c0b320d 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -107,6 +107,22 @@ static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
}
}
+static inline bool IsValidInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ case kMips:
+ case kMips64:
+ return true;
+ case kNone:
+ default:
+ return false;
+ }
+}
+
size_t GetInstructionSetAlignment(InstructionSet isa);
static inline bool Is64BitInstructionSet(InstructionSet isa) {
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 08ab356855..dba62d9200 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -38,8 +38,8 @@ void MipsContext::Reset() {
}
void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index 2c17f1c118..d808c9e0dc 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -38,8 +38,8 @@ void Mips64Context::Reset() {
}
void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0d2457e3dd..1d10e5db39 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -71,351 +71,7 @@ class StubTest : public CommonRuntimeTest {
// TODO: Set up a frame according to referrer's specs.
size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
ArtMethod* referrer) {
- // Push a transition back into managed code onto the linked list in thread.
- ManagedStack fragment;
- self->PushManagedStackFragment(&fragment);
-
- size_t result;
- size_t fpr_result = 0;
-#if defined(__i386__)
- // TODO: Set the thread?
- __asm__ __volatile__(
- "subl $12, %%esp\n\t" // Align stack.
- "pushl %[referrer]\n\t" // Store referrer.
- "call *%%edi\n\t" // Call the stub
- "addl $16, %%esp" // Pop referrer
- : "=a" (result)
- // Use the result from eax
- : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
- // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
- : "memory"); // clobber.
- // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
- // but compilation fails when declaring that.
-#elif defined(__arm__)
- __asm__ __volatile__(
- "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
- ".cfi_adjust_cfa_offset 52\n\t"
- "push {r9}\n\t"
- ".cfi_adjust_cfa_offset 4\n\t"
- "mov r9, %[referrer]\n\n"
- "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
- ".cfi_adjust_cfa_offset 8\n\t"
- "ldr r9, [sp, #8]\n\t"
-
- // Push everything on the stack, so we don't rely on the order. What a mess. :-(
- "sub sp, sp, #20\n\t"
- "str %[arg0], [sp]\n\t"
- "str %[arg1], [sp, #4]\n\t"
- "str %[arg2], [sp, #8]\n\t"
- "str %[code], [sp, #12]\n\t"
- "str %[self], [sp, #16]\n\t"
- "ldr r0, [sp]\n\t"
- "ldr r1, [sp, #4]\n\t"
- "ldr r2, [sp, #8]\n\t"
- "ldr r3, [sp, #12]\n\t"
- "ldr r9, [sp, #16]\n\t"
- "add sp, sp, #20\n\t"
-
- "blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop null and padding
- ".cfi_adjust_cfa_offset -12\n\t"
- "pop {r1-r12, lr}\n\t" // Restore state
- ".cfi_adjust_cfa_offset -52\n\t"
- "mov %[result], r0\n\t" // Save the result
- : [result] "=r" (result)
- // Use the result from r0
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "r0", "memory"); // clobber.
-#elif defined(__aarch64__)
- __asm__ __volatile__(
- // Spill x0-x7 which we say we don't clobber. May contain args.
- "sub sp, sp, #64\n\t"
- ".cfi_adjust_cfa_offset 64\n\t"
- "stp x0, x1, [sp]\n\t"
- "stp x2, x3, [sp, #16]\n\t"
- "stp x4, x5, [sp, #32]\n\t"
- "stp x6, x7, [sp, #48]\n\t"
-
- "sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
- ".cfi_adjust_cfa_offset 16\n\t"
- "str %[referrer], [sp]\n\t" // referrer
-
- // Push everything on the stack, so we don't rely on the order. What a mess. :-(
- "sub sp, sp, #48\n\t"
- ".cfi_adjust_cfa_offset 48\n\t"
- // All things are "r" constraints, so direct str/stp should work.
- "stp %[arg0], %[arg1], [sp]\n\t"
- "stp %[arg2], %[code], [sp, #16]\n\t"
- "str %[self], [sp, #32]\n\t"
-
- // Now we definitely have x0-x3 free, use it to garble d8 - d15
- "movk x0, #0xfad0\n\t"
- "movk x0, #0xebad, lsl #16\n\t"
- "movk x0, #0xfad0, lsl #32\n\t"
- "movk x0, #0xebad, lsl #48\n\t"
- "fmov d8, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d9, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d10, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d11, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d12, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d13, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d14, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d15, x0\n\t"
-
- // Load call params into the right registers.
- "ldp x0, x1, [sp]\n\t"
- "ldp x2, x3, [sp, #16]\n\t"
- "ldr x19, [sp, #32]\n\t"
- "add sp, sp, #48\n\t"
- ".cfi_adjust_cfa_offset -48\n\t"
-
-
- "blr x3\n\t" // Call the stub
- "mov x8, x0\n\t" // Store result
- "add sp, sp, #16\n\t" // Drop the quick "frame"
- ".cfi_adjust_cfa_offset -16\n\t"
-
- // Test d8 - d15. We can use x1 and x2.
- "movk x1, #0xfad0\n\t"
- "movk x1, #0xebad, lsl #16\n\t"
- "movk x1, #0xfad0, lsl #32\n\t"
- "movk x1, #0xebad, lsl #48\n\t"
- "fmov x2, d8\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d9\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d10\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d11\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d12\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d13\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d14\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d15\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
-
- "mov x9, #0\n\t" // Use x9 as flag, in clobber list
-
- // Finish up.
- "2:\n\t"
- "ldp x0, x1, [sp]\n\t" // Restore stuff not named clobbered, may contain fpr_result
- "ldp x2, x3, [sp, #16]\n\t"
- "ldp x4, x5, [sp, #32]\n\t"
- "ldp x6, x7, [sp, #48]\n\t"
- "add sp, sp, #64\n\t" // Free stack space, now sp as on entry
- ".cfi_adjust_cfa_offset -64\n\t"
-
- "str x9, %[fpr_result]\n\t" // Store the FPR comparison result
- "mov %[result], x8\n\t" // Store the call result
-
- "b 3f\n\t" // Goto end
-
- // Failed fpr verification.
- "1:\n\t"
- "mov x9, #1\n\t"
- "b 2b\n\t" // Goto finish-up
-
- // End
- "3:\n\t"
- : [result] "=r" (result)
- // Use the result from r0
- : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
- "memory"); // clobber.
-#elif defined(__mips__) && !defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
- "addiu $sp, $sp, -64\n\t"
- "sw $a0, 0($sp)\n\t"
- "sw $a1, 4($sp)\n\t"
- "sw $a2, 8($sp)\n\t"
- "sw $a3, 12($sp)\n\t"
- "sw $t0, 16($sp)\n\t"
- "sw $t1, 20($sp)\n\t"
- "sw $t2, 24($sp)\n\t"
- "sw $t3, 28($sp)\n\t"
- "sw $t4, 32($sp)\n\t"
- "sw $t5, 36($sp)\n\t"
- "sw $t6, 40($sp)\n\t"
- "sw $t7, 44($sp)\n\t"
- // Spill gp register since it is caller save.
- "sw $gp, 52($sp)\n\t"
-
- "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sw %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "addiu $sp, $sp, -20\n\t"
- "sw %[arg0], 0($sp)\n\t"
- "sw %[arg1], 4($sp)\n\t"
- "sw %[arg2], 8($sp)\n\t"
- "sw %[code], 12($sp)\n\t"
- "sw %[self], 16($sp)\n\t"
-
- // Load call params into the right registers.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $t9, 12($sp)\n\t"
- "lw $s1, 16($sp)\n\t"
- "addiu $sp, $sp, 20\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $a3, 12($sp)\n\t"
- "lw $t0, 16($sp)\n\t"
- "lw $t1, 20($sp)\n\t"
- "lw $t2, 24($sp)\n\t"
- "lw $t3, 28($sp)\n\t"
- "lw $t4, 32($sp)\n\t"
- "lw $t5, 36($sp)\n\t"
- "lw $t6, 40($sp)\n\t"
- "lw $t7, 44($sp)\n\t"
- // Restore gp.
- "lw $gp, 52($sp)\n\t"
- "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
- "fp", "ra",
- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
- "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
- "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
- "memory"); // clobber.
-#elif defined(__mips__) && defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a7 which we say we don't clobber. May contain args.
- "daddiu $sp, $sp, -64\n\t"
- "sd $a0, 0($sp)\n\t"
- "sd $a1, 8($sp)\n\t"
- "sd $a2, 16($sp)\n\t"
- "sd $a3, 24($sp)\n\t"
- "sd $a4, 32($sp)\n\t"
- "sd $a5, 40($sp)\n\t"
- "sd $a6, 48($sp)\n\t"
- "sd $a7, 56($sp)\n\t"
-
- "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sd %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "daddiu $sp, $sp, -40\n\t"
- "sd %[arg0], 0($sp)\n\t"
- "sd %[arg1], 8($sp)\n\t"
- "sd %[arg2], 16($sp)\n\t"
- "sd %[code], 24($sp)\n\t"
- "sd %[self], 32($sp)\n\t"
-
- // Load call params into the right registers.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $t9, 24($sp)\n\t"
- "ld $s1, 32($sp)\n\t"
- "daddiu $sp, $sp, 40\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $a3, 24($sp)\n\t"
- "ld $a4, 32($sp)\n\t"
- "ld $a5, 40($sp)\n\t"
- "ld $a6, 48($sp)\n\t"
- "ld $a7, 56($sp)\n\t"
- "daddiu $sp, $sp, 64\n\t"
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "fp", "ra",
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
- "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
- "f27", "f28", "f29", "f30", "f31",
- "memory"); // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
- // Note: Uses the native convention
- // TODO: Set the thread?
- __asm__ __volatile__(
- "pushq %[referrer]\n\t" // Push referrer
- "pushq (%%rsp)\n\t" // & 16B alignment padding
- ".cfi_adjust_cfa_offset 16\n\t"
- "call *%%rax\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop null and padding
- ".cfi_adjust_cfa_offset -16\n\t"
- : "=a" (result)
- // Use the result from rax
- : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "c"(referrer)
- // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
- : "rbx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "memory"); // clobber all
- // TODO: Should we clobber the other registers?
-#else
- UNUSED(arg0, arg1, arg2, code, referrer);
- LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
- result = 0;
-#endif
- // Pop transition.
- self->PopManagedStackFragment(fragment);
-
- fp_result = fpr_result;
- EXPECT_EQ(0U, fp_result);
-
- return result;
+ return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
}
// TODO: Set up a frame according to referrer's specs.
@@ -429,19 +85,55 @@ class StubTest : public CommonRuntimeTest {
size_t fpr_result = 0;
#if defined(__i386__)
// TODO: Set the thread?
+#define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
+#define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
__asm__ __volatile__(
- "movd %[hidden], %%xmm7\n\t"
- "subl $12, %%esp\n\t" // Align stack.
- "pushl %[referrer]\n\t" // Store referrer
+ "movd %[hidden], %%xmm7\n\t" // This is a memory op, so do this early. If it is off of
+ // esp, then we won't be able to access it after spilling.
+
+ // Spill 6 registers.
+ PUSH(%%ebx)
+ PUSH(%%ecx)
+ PUSH(%%edx)
+ PUSH(%%esi)
+ PUSH(%%edi)
+ PUSH(%%ebp)
+
+ // Store the inputs to the stack, but keep the referrer up top, less work.
+ PUSH(%[referrer]) // Align stack.
+ PUSH(%[referrer]) // Store referrer
+
+ PUSH(%[arg0])
+ PUSH(%[arg1])
+ PUSH(%[arg2])
+ PUSH(%[code])
+ // Now read them back into the required registers.
+ POP(%%edi)
+ POP(%%edx)
+ POP(%%ecx)
+ POP(%%eax)
+ // Call is prepared now.
+
"call *%%edi\n\t" // Call the stub
- "addl $16, %%esp" // Pop referrer
+ "addl $8, %%esp\n\t" // Pop referrer and padding.
+ ".cfi_adjust_cfa_offset -8\n\t"
+
+ // Restore 6 registers.
+ POP(%%ebp)
+ POP(%%edi)
+ POP(%%esi)
+ POP(%%edx)
+ POP(%%ecx)
+ POP(%%ebx)
+
: "=a" (result)
// Use the result from eax
- : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer), [hidden]"m"(hidden)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
+ [referrer]"r"(referrer), [hidden]"m"(hidden)
// This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
- : "memory"); // clobber.
- // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
- // but compilation fails when declaring that.
+ : "memory", "xmm7"); // clobber.
+#undef PUSH
+#undef POP
#elif defined(__arm__)
__asm__ __volatile__(
"push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
@@ -743,23 +435,72 @@ class StubTest : public CommonRuntimeTest {
"f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
"f27", "f28", "f29", "f30", "f31",
"memory"); // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
- // Note: Uses the native convention
+#elif defined(__x86_64__) && !defined(__APPLE__)
+#define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
+#define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
+ // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
+ // restoring almost all registers.
// TODO: Set the thread?
__asm__ __volatile__(
- "pushq %[referrer]\n\t" // Push referrer
- "pushq (%%rsp)\n\t" // & 16B alignment padding
- ".cfi_adjust_cfa_offset 16\n\t"
- "call *%%rbx\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop null and padding
+ // Spill almost everything (except rax, rsp). 14 registers.
+ PUSH(%%rbx)
+ PUSH(%%rcx)
+ PUSH(%%rdx)
+ PUSH(%%rsi)
+ PUSH(%%rdi)
+ PUSH(%%rbp)
+ PUSH(%%r8)
+ PUSH(%%r9)
+ PUSH(%%r10)
+ PUSH(%%r11)
+ PUSH(%%r12)
+ PUSH(%%r13)
+ PUSH(%%r14)
+ PUSH(%%r15)
+
+ PUSH(%[referrer]) // Push referrer & 16B alignment padding
+ PUSH(%[referrer])
+
+ // Now juggle the input registers.
+ PUSH(%[arg0])
+ PUSH(%[arg1])
+ PUSH(%[arg2])
+ PUSH(%[hidden])
+ PUSH(%[code])
+ POP(%%r8)
+ POP(%%rax)
+ POP(%%rdx)
+ POP(%%rsi)
+ POP(%%rdi)
+
+ "call *%%r8\n\t" // Call the stub
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
+
+ POP(%%r15)
+ POP(%%r14)
+ POP(%%r13)
+ POP(%%r12)
+ POP(%%r11)
+ POP(%%r10)
+ POP(%%r9)
+ POP(%%r8)
+ POP(%%rbp)
+ POP(%%rdi)
+ POP(%%rsi)
+ POP(%%rdx)
+ POP(%%rcx)
+ POP(%%rbx)
+
: "=a" (result)
// Use the result from rax
- : "D"(arg0), "S"(arg1), "d"(arg2), "b"(code), [referrer] "c"(referrer), [hidden] "a"(hidden)
- // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
- : "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "memory"); // clobber all
- // TODO: Should we clobber the other registers?
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
+ [referrer] "r"(referrer), [hidden] "r"(hidden)
+ // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
+ // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
+ : "memory"); // We spill and restore (almost) all registers, so only mention memory here.
+#undef PUSH
+#undef POP
#else
UNUSED(arg0, arg1, arg2, code, referrer, hidden);
LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 987ad60fd8..0d88dd0dc5 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,9 +16,10 @@
#include "context_x86.h"
-#include "art_method-inl.h"
+#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "stack.h"
namespace art {
namespace x86 {
@@ -37,8 +38,8 @@ void X86Context::Reset() {
}
void X86Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 3dc7d71df4..12c94bc598 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,9 +16,10 @@
#include "context_x86_64.h"
-#include "art_method-inl.h"
+#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "stack.h"
namespace art {
namespace x86_64 {
@@ -37,8 +38,8 @@ void X86_64Context::Reset() {
}
void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/art_code.cc b/runtime/art_code.cc
new file mode 100644
index 0000000000..ad0b170079
--- /dev/null
+++ b/runtime/art_code.cc
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_code.h"
+
+#include "art_method.h"
+#include "art_method-inl.h"
+#include "class_linker.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "handle_scope.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mapping_table.h"
+#include "oat.h"
+#include "runtime.h"
+#include "utils.h"
+
+namespace art {
+
+ // Converts a dex PC to a native PC.
+uintptr_t ArtCode::ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ if (IsOptimized(sizeof(void*))) {
+ // Optimized code does not have a mapping table. Search for the dex-to-pc
+ // mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ }
+ } else {
+ MappingTable table((entry_point != nullptr) ? GetMappingTable(sizeof(void*)) : nullptr);
+ if (table.TotalSize() == 0) {
+ DCHECK_EQ(dex_pc, 0U);
+ return 0; // Special no mapping/pc == 0 case
+ }
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ }
+
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(method_);
+ }
+ return UINTPTR_MAX;
+}
+
+bool ArtCode::IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Temporary solution for detecting if a method has been optimized: the compiler
+ // does not create a GC map. Instead, the vmap table contains the stack map
+ // (as in stack_map.h).
+ return !method_->IsNative()
+ && method_->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+ && GetQuickOatEntryPoint(pointer_size) != nullptr
+ && GetNativeGcMap(pointer_size) == nullptr;
+}
+
+CodeInfo ArtCode::GetOptimizedCodeInfo() {
+ DCHECK(IsOptimized(sizeof(void*)));
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(sizeof(void*)));
+ DCHECK(code_pointer != nullptr);
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+ const void* data =
+ reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
+ return CodeInfo(data);
+}
+
+uintptr_t ArtCode::NativeQuickPcOffset(const uintptr_t pc) {
+ const void* quick_entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
+ CHECK_EQ(quick_entry_point,
+ Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*)));
+ return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+}
+
+uint32_t ArtCode::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+ if (IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ if (stack_map.IsValid()) {
+ return stack_map.GetDexPc(encoding);
+ }
+ } else {
+ MappingTable table(entry_point != nullptr ? GetMappingTable(sizeof(void*)) : nullptr);
+ if (table.TotalSize() == 0) {
+ // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
+ // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
+ DCHECK(method_->IsNative() || method_->IsCalleeSaveMethod() || method_->IsProxyMethod())
+ << PrettyMethod(method_);
+ return DexFile::kDexNoIndex; // Special no mapping case
+ }
+ // Assume the caller wants a pc-to-dex mapping so check here first.
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ // Now check dex-to-pc mappings.
+ typedef MappingTable::DexToPcIterator It2;
+ for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ }
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
+ << ") in " << PrettyMethod(method_);
+ }
+ return DexFile::kDexNoIndex;
+}
+
+const uint8_t* ArtCode::GetNativeGcMap(size_t pointer_size) {
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetVmapTable(size_t pointer_size) {
+ CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetMappingTable(size_t pointer_size) {
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+// Counts the number of references in the parameter list of the corresponding method.
+// Note: Thus does _not_ include "this" for non-static methods.
+static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t shorty_len;
+ const char* shorty = method->GetShorty(&shorty_len);
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ return refs;
+}
+
+QuickMethodFrameInfo ArtCode::GetQuickFrameInfo() {
+ Runtime* runtime = Runtime::Current();
+
+ if (UNLIKELY(method_->IsAbstract())) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+
+ // This goes before IsProxyMethod since runtime methods have a null declaring class.
+ if (UNLIKELY(method_->IsRuntimeMethod())) {
+ return runtime->GetRuntimeMethodFrameInfo(method_);
+ }
+
+ // For Proxy method we add special handling for the direct method case (there is only one
+ // direct method - constructor). Direct method is cloned from original
+ // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+ // quick compiled method without any stubs. So the frame info should be returned as it is a
+ // quick method not a stub. However, if instrumentation stubs are installed, the
+ // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+ // oat code pointer, thus we have to add a special case here.
+ if (UNLIKELY(method_->IsProxyMethod())) {
+ if (method_->IsDirect()) {
+ CHECK(method_->IsConstructor());
+ const void* code_pointer =
+ EntryPointToCodePointer(method_->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+ } else {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ }
+
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*));
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+ DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
+
+ if (class_linker->IsQuickGenericJniStub(entry_point)) {
+ // Generic JNI frame.
+ DCHECK(method_->IsNative());
+ uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method_) + 1;
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+ // Callee saves + handle scope + method ref + alignment
+ // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
+ sizeof(ArtMethod*) + scope_size, kStackAlignment);
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+ }
+
+ const void* code_pointer = EntryPointToCodePointer(entry_point);
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+}
+
+void ArtCode::AssertPcIsWithinQuickCode(uintptr_t pc) {
+ if (method_->IsNative() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+ return;
+ }
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ return;
+ }
+ const void* code = method_->GetEntryPointFromQuickCompiledCode();
+ if (code == GetQuickInstrumentationEntryPoint()) {
+ return;
+ }
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickResolutionStub(code)) {
+ return;
+ }
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
+
+ uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
+ EntryPointToCodePointer(code))[-1].code_size_;
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
+ CHECK(code_start <= pc && pc <= (code_start + code_size))
+ << PrettyMethod(method_)
+ << " pc=" << std::hex << pc
+ << " code=" << code
+ << " size=" << code_size;
+}
+
+bool ArtCode::PcIsWithinQuickCode(uintptr_t pc) {
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+ method_->GetEntryPointFromQuickCompiledCode()));
+ if (code == 0) {
+ return pc == 0;
+ }
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return code <= pc && pc <= (code + code_size);
+}
+
+const void* ArtCode::GetQuickOatEntryPoint(size_t pointer_size) {
+ if (method_->IsAbstract() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(method_, pointer_size);
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods.
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickGenericJniStub(code)) {
+ return nullptr;
+ }
+ return code;
+}
+
+} // namespace art
diff --git a/runtime/art_code.h b/runtime/art_code.h
new file mode 100644
index 0000000000..1d2d898ed6
--- /dev/null
+++ b/runtime/art_code.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ART_CODE_H_
+#define ART_RUNTIME_ART_CODE_H_
+
+#include "base/mutex.h"
+#include "offsets.h"
+#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
+
+namespace art {
+
+class ArtMethod;
+
+class ArtCode FINAL {
+ public:
+ explicit ArtCode(ArtMethod** method) : method_(*method) {}
+ explicit ArtCode(ArtMethod* method) : method_(method) {}
+ ArtCode() : method_(nullptr) {}
+
+ // Converts a dex PC to a native PC.
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure = true)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Converts a native PC to a dex PC.
+ uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
+ const uint8_t* GetNativeGcMap(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const uint8_t* GetVmapTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const uint8_t* GetMappingTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return FrameOffset(GetFrameSizeInBytes() - sizeof(void*));
+ }
+
+ template <bool kCheckFrameSize = true>
+ uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
+ if (kCheckFrameSize) {
+ DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ }
+ return result;
+ }
+
+ const void* GetQuickOatEntryPoint(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool PcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
+ DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
+ return FrameOffset(handle_scope_offset);
+ }
+
+ ArtMethod* GetMethod() const { return method_; }
+
+ private:
+ ArtMethod* method_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ART_CODE_H_
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 632a50f15c..f741732046 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -212,18 +212,6 @@ inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx,
return type;
}
-inline uint32_t ArtMethod::GetCodeSize() {
- DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
-}
-
-inline uint32_t ArtMethod::GetCodeSize(const void* code) {
- if (code == nullptr) {
- return 0u;
- }
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
-}
-
inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
switch (type) {
case kStatic:
@@ -231,8 +219,9 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
case kDirect:
return !IsDirect() || IsStatic();
case kVirtual: {
+ // We have an error if we are direct or a non-default, non-miranda interface method.
mirror::Class* methods_class = GetDeclaringClass();
- return IsDirect() || (methods_class->IsInterface() && !IsMiranda());
+ return IsDirect() || (methods_class->IsInterface() && !IsDefault() && !IsMiranda());
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
@@ -248,85 +237,6 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
}
}
-inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
- DCHECK(!Runtime::Current()->IsStarted());
- return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
-}
-
-inline void ArtMethod::SetQuickOatCodeOffset(uint32_t code_offset) {
- DCHECK(!Runtime::Current()->IsStarted());
- SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(code_offset));
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetMappingTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer, size_t pointer_size) {
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetVmapTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer, size_t pointer_size) {
- CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized(sizeof(void*)));
- const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- const void* data =
- reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- return CodeInfo(data);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetNativeGcMap(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(const void* code_pointer, size_t pointer_size) {
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
inline bool ArtMethod::IsRuntimeMethod() {
return dex_method_index_ == DexFile::kDexNoIndex;
}
@@ -367,20 +277,6 @@ inline bool ArtMethod::IsImtUnimplementedMethod() {
return result;
}
-inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(
- this, sizeof(void*));
- return pc - reinterpret_cast<uintptr_t>(code);
-}
-
-inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
- DCHECK(code_pointer != nullptr);
- if (kIsDebugBuild && !IsProxyMethod()) {
- CHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
- }
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
-}
-
inline const DexFile* ArtMethod::GetDexFile() {
return GetDexCache()->GetDexFile();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 92648b9b1b..f9d9077261 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -180,98 +180,6 @@ uint32_t ArtMethod::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfil
return DexFile::kDexNoIndex;
}
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
- if (IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
- if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ?
- GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
- // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
- DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
- return DexFile::kDexNoIndex; // Special no mapping case
- }
- // Assume the caller wants a pc-to-dex mapping so check here first.
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- // Now check dex-to-pc mappings.
- typedef MappingTable::DexToPcIterator It2;
- for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- }
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
- << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
- << ") in " << PrettyMethod(this);
- }
- return DexFile::kDexNoIndex;
-}
-
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- if (IsOptimized(sizeof(void*))) {
- // Optimized code does not have a mapping table. Search for the dex-to-pc
- // mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
-
- // All stack maps are stored in the same CodeItem section, safepoint stack
- // maps first, then catch stack maps. We use `is_for_catch_handler` to select
- // the order of iteration.
- StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
- if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ?
- GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- }
-
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(this);
- }
- return UINTPTR_MAX;
-}
-
uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
uint32_t dex_pc, bool* has_no_move_exception) {
const DexFile::CodeItem* code_item = GetCodeItem();
@@ -322,76 +230,6 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
return found_dex_pc;
}
-void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
- if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
- return;
- }
- if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
- return;
- }
- const void* code = GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
- return;
- }
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickResolutionStub(code)) {
- return;
- }
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
- return;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
- << PrettyMethod(this)
- << " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << GetCodeSize(
- EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
-}
-
-bool ArtMethod::IsEntrypointInterpreter() {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
- return oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode();
-}
-
-const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
- if (IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods.
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickGenericJniStub(code)) {
- return nullptr;
- }
- return code;
-}
-
-#ifndef NDEBUG
-uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
- CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point,
- Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-#endif
-
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -435,8 +273,9 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- DCHECK(!runtime->UseJit());
- CHECK(IsEntrypointInterpreter())
+ CHECK(!runtime->UseJit());
+ const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this);
+ CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
@@ -480,74 +319,6 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
self->PopManagedStackFragment(fragment);
}
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t shorty_len;
- const char* shorty = method->GetShorty(&shorty_len);
- uint32_t refs = 0;
- for (uint32_t i = 1; i < shorty_len ; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- return refs;
-}
-
-QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
- Runtime* runtime = Runtime::Current();
-
- if (UNLIKELY(IsAbstract())) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
-
- // This goes before IsProxyMethod since runtime methods have a null declaring class.
- if (UNLIKELY(IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(this);
- }
-
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
- if (UNLIKELY(IsProxyMethod())) {
- if (IsDirect()) {
- CHECK(IsConstructor());
- return GetQuickFrameInfo(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
- }
-
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
- ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods. And we really shouldn't see a failure for non-native methods here.
- DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
-
- if (class_linker->IsQuickGenericJniStub(entry_point)) {
- // Generic JNI frame.
- DCHECK(IsNative());
- uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(this) + 1;
- size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
- // Callee saves + handle scope + method ref + alignment
- // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
- sizeof(ArtMethod*) + scope_size, kStackAlignment);
- return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
- }
-
- const void* code_pointer = EntryPointToCodePointer(entry_point);
- return GetQuickFrameInfo(code_pointer);
-}
-
void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
CHECK(IsNative()) << PrettyMethod(this);
CHECK(!IsFastNative()) << PrettyMethod(this);
@@ -590,16 +361,6 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
-const uint8_t* ArtMethod::GetQuickenedInfo() {
- bool found = false;
- OatFile::OatMethod oat_method =
- Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
- if (!found || (oat_method.GetQuickCode() != nullptr)) {
- return nullptr;
- }
- return oat_method.GetVmapTable();
-}
-
ProfilingInfo* ArtMethod::CreateProfilingInfo() {
DCHECK(!Runtime::Current()->IsAotCompiler());
ProfilingInfo* info = ProfilingInfo::Create(this);
@@ -613,4 +374,14 @@ ProfilingInfo* ArtMethod::CreateProfilingInfo() {
}
}
+const uint8_t* ArtMethod::GetQuickenedInfo() {
+ bool found = false;
+ OatFile::OatMethod oat_method =
+ Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
+ if (!found || (oat_method.GetQuickCode() != nullptr)) {
+ return nullptr;
+ }
+ return oat_method.GetVmapTable();
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 0315c3a953..9743250cc0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_ART_METHOD_H_
#define ART_RUNTIME_ART_METHOD_H_
+#include "base/bit_utils.h"
#include "base/casts.h"
#include "dex_file.h"
#include "gc_root.h"
@@ -24,10 +25,8 @@
#include "method_reference.h"
#include "modifiers.h"
#include "mirror/object.h"
-#include "quick/quick_method_frame_info.h"
#include "read_barrier_option.h"
#include "stack.h"
-#include "stack_map.h"
#include "utils.h"
namespace art {
@@ -136,6 +135,11 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccMiranda) != 0;
}
+ // This is set by the class linker.
+ bool IsDefault() {
+ return (GetAccessFlags() & kAccDefault) != 0;
+ }
+
bool IsNative() {
return (GetAccessFlags() & kAccNative) != 0;
}
@@ -164,14 +168,9 @@ class ArtMethod FINAL {
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
- bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
- // Temporary solution for detecting if a method has been optimized: the compiler
- // does not create a GC map. Instead, the vmap table contains the stack map
- // (as in stack_map.h).
- return !IsNative()
- && GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
- && GetQuickOatCodePointer(pointer_size) != nullptr
- && GetNativeGcMap(pointer_size) == nullptr;
+ // Returns true if this method could be overridden by a default method.
+ bool IsOverridableByDefaultMethod() {
+ return IsDefault() || IsAbstract();
}
bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -280,94 +279,6 @@ class ArtMethod FINAL {
entry_point_from_quick_compiled_code, pointer_size);
}
- uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Check whether the given PC is within the quick compiled code associated with this method's
- // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
- // debug purposes.
- bool PcIsWithinQuickCode(uintptr_t pc) {
- return PcIsWithinQuickCode(
- reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
- }
-
- void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Returns true if the entrypoint points to the interpreter, as
- // opposed to the compiled code, that is, this method will be
- // interpretered on invocation.
- bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_);
-
- uint32_t GetQuickOatCodeOffset();
- void SetQuickOatCodeOffset(uint32_t code_offset);
-
- ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
- uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
- // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
- // least 2 byte aligned.
- code &= ~0x1;
- return reinterpret_cast<const void*>(code);
- }
-
- // Actual entry point pointer to compiled oat code or null.
- const void* GetQuickOatEntryPoint(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- // Actual pointer to compiled oat code or null.
- const void* GetQuickOatCodePointer(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- }
-
- // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
- const uint8_t* GetMappingTable(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
- const uint8_t* GetVmapTable(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
- const uint8_t* GetNativeGcMap(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
- if (kCheckFrameSize) {
- DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
- }
- return result;
- }
-
- QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
- QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetReturnPcOffset(GetFrameSizeInBytes());
- }
-
- FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
- return FrameOffset(frame_size_in_bytes - sizeof(void*));
- }
-
- FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
- DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
- return FrameOffset(handle_scope_offset);
- }
-
void RegisterNative(const void* native_method, bool is_fast)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -429,27 +340,6 @@ class ArtMethod FINAL {
bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-#ifdef NDEBUG
- uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
- }
-#else
- uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_REQUIRES(Locks::mutator_lock_);
-#endif
-
- // Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
return MethodReference(GetDexFile(), GetDexMethodIndex());
}
@@ -542,6 +432,8 @@ class ArtMethod FINAL {
return ++hotness_count_;
}
+ const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
@@ -622,24 +514,6 @@ class ArtMethod FINAL {
}
}
- // Code points to the start of the quick code.
- static uint32_t GetCodeSize(const void* code);
-
- static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
- if (code == 0) {
- return pc == 0;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- return code <= pc && pc <= code + GetCodeSize(
- EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
- }
-
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 7aa71f9c3e..1704688565 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -93,6 +93,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"StackMapStm ",
"CodeGen ",
"ParallelMove ",
+ "GraphChecker ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 47cd8b5d29..004895ab42 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -105,6 +105,7 @@ enum ArenaAllocKind {
kArenaAllocStackMapStream,
kArenaAllocCodeGenerator,
kArenaAllocParallelMoveResolver,
+ kArenaAllocGraphChecker,
kNumArenaAllocKinds
};
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b2c567760f..70bd398415 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -50,6 +50,8 @@ Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
MutatorMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_count_lock_ = nullptr;
Mutex* Locks::reference_processor_lock_ = nullptr;
Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
@@ -940,6 +942,8 @@ void Locks::Init() {
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(oat_file_manager_lock_ != nullptr);
+ DCHECK(oat_file_count_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -1028,6 +1032,14 @@ void Locks::Init() {
modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
}
+ UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
+ DCHECK(oat_file_manager_lock_ == nullptr);
+ oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kOatFileCountLock);
+ DCHECK(oat_file_count_lock_ == nullptr);
+ oat_file_count_lock_ = new ReaderWriterMutex("OatFile count lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3da806b54d..d4c9057ab3 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -83,6 +83,8 @@ enum LockLevel {
kDexFileToMethodInlinerMapLock,
kInternTableLock,
kOatFileSecondaryLookupLock,
+ kOatFileCountLock,
+ kOatFileManagerLock,
kTracingUniqueMethodsLock,
kTracingStreamingLock,
kDefaultMutexLevel,
@@ -644,8 +646,14 @@ class Locks {
// Guards modification of the LDT on x86.
static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+ // Guards opened oat files in OatFileManager.
+ static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
+ // Guards opened oat files in OatFileManager.
+ static ReaderWriterMutex* oat_file_count_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
+
// Guards intern table.
- static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+ static Mutex* intern_table_lock_ ACQUIRED_AFTER(oat_file_count_lock_);
// Guards reference processor.
static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 7965cd78bc..e8973511e3 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#include "art_code.h"
#include "art_method-inl.h"
#include "gc_map.h"
#include "scoped_thread_state_change.h"
@@ -53,7 +54,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (GetMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -64,7 +65,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = m->GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -108,7 +109,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
+ NativePcOffsetToReferenceMap map(GetCurrentCode().GetNativeGcMap(sizeof(void*)));
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
CHECK(ref_bitmap);
for (int i = 0; i < number_of_references; ++i) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index acb39c5402..4ce52f10f3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -16,12 +16,15 @@
#include "class_linker.h"
+#include <algorithm>
#include <deque>
#include <iostream>
#include <memory>
#include <queue>
#include <string>
+#include <tuple>
#include <unistd.h>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -41,24 +44,20 @@
#include "compiler_callbacks.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "leb128.h"
#include "linear_alloc.h"
-#include "oat.h"
-#include "oat_file.h"
-#include "oat_file-inl.h"
-#include "oat_file_assistant.h"
-#include "object_lock.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -72,12 +71,17 @@
#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
+#include "native/dalvik_system_DexFile.h"
+#include "oat.h"
+#include "oat_file.h"
+#include "oat_file-inl.h"
+#include "oat_file_assistant.h"
+#include "oat_file_manager.h"
+#include "object_lock.h"
#include "os.h"
#include "runtime.h"
-#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
#include "thread-inl.h"
#include "trace.h"
#include "utils.h"
@@ -89,9 +93,6 @@ namespace art {
static constexpr bool kSanityCheckObjects = kIsDebugBuild;
-// For b/21333911.
-static constexpr bool kDuplicateClassesCheck = false;
-
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -696,343 +697,6 @@ void ClassLinker::RunRootClinits() {
}
}
-const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
- WriterMutexLock mu(Thread::Current(), dex_lock_);
- if (kIsDebugBuild) {
- for (size_t i = 0; i < oat_files_.size(); ++i) {
- CHECK_NE(oat_file, oat_files_[i]) << oat_file->GetLocation();
- }
- }
- VLOG(class_linker) << "Registering " << oat_file->GetLocation();
- oat_files_.push_back(oat_file);
- return oat_file;
-}
-
-OatFile& ClassLinker::GetImageOatFile(gc::space::ImageSpace* space) {
- VLOG(startup) << "ClassLinker::GetImageOatFile entering";
- OatFile* oat_file = space->ReleaseOatFile();
- CHECK_EQ(RegisterOatFile(oat_file), oat_file);
- VLOG(startup) << "ClassLinker::GetImageOatFile exiting";
- return *oat_file;
-}
-
-class DexFileAndClassPair : ValueObject {
- public:
- DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
- : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
- dex_file_(dex_file),
- current_class_index_(current_class_index),
- from_loaded_oat_(from_loaded_oat) {}
-
- DexFileAndClassPair(const DexFileAndClassPair&) = default;
-
- DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) {
- cached_descriptor_ = rhs.cached_descriptor_;
- dex_file_ = rhs.dex_file_;
- current_class_index_ = rhs.current_class_index_;
- from_loaded_oat_ = rhs.from_loaded_oat_;
- return *this;
- }
-
- const char* GetCachedDescriptor() const {
- return cached_descriptor_;
- }
-
- bool operator<(const DexFileAndClassPair& rhs) const {
- const char* lhsDescriptor = cached_descriptor_;
- const char* rhsDescriptor = rhs.cached_descriptor_;
- int cmp = strcmp(lhsDescriptor, rhsDescriptor);
- if (cmp != 0) {
- // Note that the order must be reversed. We want to iterate over the classes in dex files.
- // They are sorted lexicographically. Thus, the priority-queue must be a min-queue.
- return cmp > 0;
- }
- return dex_file_ < rhs.dex_file_;
- }
-
- bool DexFileHasMoreClasses() const {
- return current_class_index_ + 1 < dex_file_->NumClassDefs();
- }
-
- DexFileAndClassPair GetNext() const {
- return DexFileAndClassPair(dex_file_, current_class_index_ + 1, from_loaded_oat_);
- }
-
- size_t GetCurrentClassIndex() const {
- return current_class_index_;
- }
-
- bool FromLoadedOat() const {
- return from_loaded_oat_;
- }
-
- const DexFile* GetDexFile() const {
- return dex_file_;
- }
-
- void DeleteDexFile() {
- delete dex_file_;
- dex_file_ = nullptr;
- }
-
- private:
- static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
- return dex_file->StringByTypeIdx(class_def.class_idx_);
- }
-
- const char* cached_descriptor_;
- const DexFile* dex_file_;
- size_t current_class_index_;
- bool from_loaded_oat_; // We only need to compare mismatches between what we load now
- // and what was loaded before. Any old duplicates must have been
- // OK, and any new "internal" duplicates are as well (they must
- // be from multidex, which resolves correctly).
-};
-
-static void AddDexFilesFromOat(const OatFile* oat_file,
- bool already_loaded,
- std::priority_queue<DexFileAndClassPair>* heap) {
- const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles();
- for (const OatDexFile* oat_dex_file : oat_dex_files) {
- std::string error;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
- if (dex_file.get() == nullptr) {
- LOG(WARNING) << "Could not create dex file from oat file: " << error;
- } else {
- if (dex_file->NumClassDefs() > 0U) {
- heap->emplace(dex_file.release(), 0U, already_loaded);
- }
- }
- }
-}
-
-static void AddNext(DexFileAndClassPair* original,
- std::priority_queue<DexFileAndClassPair>* heap) {
- if (original->DexFileHasMoreClasses()) {
- heap->push(original->GetNext());
- } else {
- // Need to delete the dex file.
- original->DeleteDexFile();
- }
-}
-
-static void FreeDexFilesInHeap(std::priority_queue<DexFileAndClassPair>* heap) {
- while (!heap->empty()) {
- delete heap->top().GetDexFile();
- heap->pop();
- }
-}
-
-const OatFile* ClassLinker::GetBootOatFile() {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- return nullptr;
- }
- return image_space->GetOatFile();
-}
-
-const OatFile* ClassLinker::GetPrimaryOatFile() {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- const OatFile* boot_oat_file = GetBootOatFile();
- if (boot_oat_file != nullptr) {
- for (const OatFile* oat_file : oat_files_) {
- if (oat_file != boot_oat_file) {
- return oat_file;
- }
- }
- }
- return nullptr;
-}
-
-// Check for class-def collisions in dex files.
-//
-// This works by maintaining a heap with one class from each dex file, sorted by the class
-// descriptor. Then a dex-file/class pair is continually removed from the heap and compared
-// against the following top element. If the descriptor is the same, it is now checked whether
-// the two elements agree on whether their dex file was from an already-loaded oat-file or the
-// new oat file. Any disagreement indicates a collision.
-bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) {
- if (!kDuplicateClassesCheck) {
- return false;
- }
-
- // Dex files are registered late - once a class is actually being loaded. We have to compare
- // against the open oat files. Take the dex_lock_ that protects oat_files_ accesses.
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
-
- std::priority_queue<DexFileAndClassPair> queue;
-
- // Add dex files from already loaded oat files, but skip boot.
- {
- const OatFile* boot_oat = GetBootOatFile();
- for (const OatFile* loaded_oat_file : oat_files_) {
- if (loaded_oat_file == boot_oat) {
- continue;
- }
- AddDexFilesFromOat(loaded_oat_file, true, &queue);
- }
- }
-
- if (queue.empty()) {
- // No other oat files, return early.
- return false;
- }
-
- // Add dex files from the oat file to check.
- AddDexFilesFromOat(oat_file, false, &queue);
-
- // Now drain the queue.
- while (!queue.empty()) {
- DexFileAndClassPair compare_pop = queue.top();
- queue.pop();
-
- // Compare against the following elements.
- while (!queue.empty()) {
- DexFileAndClassPair top = queue.top();
-
- if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
- // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
- if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
- *error_msg =
- StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
- compare_pop.GetCachedDescriptor(),
- compare_pop.GetDexFile()->GetLocation().c_str(),
- top.GetDexFile()->GetLocation().c_str());
- FreeDexFilesInHeap(&queue);
- return true;
- }
- // Pop it.
- queue.pop();
- AddNext(&top, &queue);
- } else {
- // Something else. Done here.
- break;
- }
- }
- AddNext(&compare_pop, &queue);
- }
-
- return false;
-}
-
-std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
- const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs) {
- CHECK(error_msgs != nullptr);
-
- // Verify we aren't holding the mutator lock, which could starve GC if we
- // have to generate or relocate an oat file.
- Locks::mutator_lock_->AssertNotHeld(Thread::Current());
-
- OatFileAssistant oat_file_assistant(dex_location, oat_location, kRuntimeISA,
- !Runtime::Current()->IsAotCompiler());
-
- // Lock the target oat location to avoid races generating and loading the
- // oat file.
- std::string error_msg;
- if (!oat_file_assistant.Lock(&error_msg)) {
- // Don't worry too much if this fails. If it does fail, it's unlikely we
- // can generate an oat file anyway.
- VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
- }
-
- // Check if we already have an up-to-date oat file open.
- const OatFile* source_oat_file = nullptr;
- {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (const OatFile* oat_file : oat_files_) {
- CHECK(oat_file != nullptr);
- if (oat_file_assistant.GivenOatFileIsUpToDate(*oat_file)) {
- source_oat_file = oat_file;
- break;
- }
- }
- }
-
- // If we didn't have an up-to-date oat file open, try to load one from disk.
- if (source_oat_file == nullptr) {
- // Update the oat file on disk if we can. This may fail, but that's okay.
- // Best effort is all that matters here.
- if (!oat_file_assistant.MakeUpToDate(&error_msg)) {
- LOG(WARNING) << error_msg;
- }
-
- // Get the oat file on disk.
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- if (oat_file.get() != nullptr) {
- // Take the file only if it has no collisions, or we must take it because of preopting.
- bool accept_oat_file = !HasCollisions(oat_file.get(), &error_msg);
- if (!accept_oat_file) {
- // Failed the collision check. Print warning.
- if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- LOG(WARNING) << "Found duplicate classes, falling back to interpreter mode for "
- << dex_location;
- } else {
- LOG(WARNING) << "Found duplicate classes, dex-file-fallback disabled, will be failing to "
- " load classes for " << dex_location;
- }
- LOG(WARNING) << error_msg;
-
- // However, if the app was part of /system and preopted, there is no original dex file
- // available. In that case grudgingly accept the oat file.
- if (!DexFile::MaybeDex(dex_location)) {
- accept_oat_file = true;
- LOG(WARNING) << "Dex location " << dex_location << " does not seem to include dex file. "
- << "Allow oat file use. This is potentially dangerous.";
- }
- }
-
- if (accept_oat_file) {
- source_oat_file = oat_file.release();
- RegisterOatFile(source_oat_file);
- }
- }
- }
-
- std::vector<std::unique_ptr<const DexFile>> dex_files;
-
- // Load the dex files from the oat file.
- if (source_oat_file != nullptr) {
- dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
- if (dex_files.empty()) {
- error_msgs->push_back("Failed to open dex files from "
- + source_oat_file->GetLocation());
- }
- }
-
- // Fall back to running out of the original dex file if we couldn't load any
- // dex_files from the oat file.
- if (dex_files.empty()) {
- if (oat_file_assistant.HasOriginalDexFiles()) {
- if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- if (!DexFile::Open(dex_location, dex_location, &error_msg, &dex_files)) {
- LOG(WARNING) << error_msg;
- error_msgs->push_back("Failed to open dex files from " + std::string(dex_location));
- }
- } else {
- error_msgs->push_back("Fallback mode disabled, skipping dex files.");
- }
- } else {
- error_msgs->push_back("No original dex files found for dex location "
- + std::string(dex_location));
- }
- }
- return dex_files;
-}
-
-const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i < oat_files_.size(); i++) {
- const OatFile* oat_file = oat_files_[i];
- DCHECK(oat_file != nullptr);
- if (oat_file->GetLocation() == oat_location) {
- return oat_file;
- }
- }
- return nullptr;
-}
-
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
gc::space::ImageSpace* space)
@@ -1169,16 +833,17 @@ void ClassLinker::InitFromImage() {
CHECK(space != nullptr);
image_pointer_size_ = space->GetImageHeader().GetPointerSize();
dex_cache_image_class_lookup_required_ = true;
- OatFile& oat_file = GetImageOatFile(space);
- CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
- CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
- const char* image_file_location = oat_file.GetOatHeader().
+ const OatFile* oat_file = runtime->GetOatFileManager().RegisterImageOatFile(space);
+ DCHECK(oat_file != nullptr);
+ CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
+ CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
+ const char* image_file_location = oat_file->GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
CHECK(image_file_location == nullptr || *image_file_location == 0);
- quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline();
- quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
- quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
- quick_to_interpreter_bridge_trampoline_ = oat_file.GetOatHeader().GetQuickToInterpreterBridge();
+ quick_resolution_trampoline_ = oat_file->GetOatHeader().GetQuickResolutionTrampoline();
+ quick_imt_conflict_trampoline_ = oat_file->GetOatHeader().GetQuickImtConflictTrampoline();
+ quick_generic_jni_trampoline_ = oat_file->GetOatHeader().GetQuickGenericJniTrampoline();
+ quick_to_interpreter_bridge_trampoline_ = oat_file->GetOatHeader().GetQuickToInterpreterBridge();
StackHandleScope<2> hs(self);
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches(
@@ -1200,20 +865,20 @@ void ClassLinker::InitFromImage() {
java_lang_Object->GetObjectSize(),
VoidFunctor()));
- CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
+ CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
StackHandleScope<1> hs2(self);
Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
- const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
- nullptr);
- CHECK(oat_dex_file != nullptr) << oat_file.GetLocation() << " " << dex_file_location;
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location.c_str(),
+ nullptr);
+ CHECK(oat_dex_file != nullptr) << oat_file->GetLocation() << " " << dex_file_location;
std::string error_msg;
std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
+ if (dex_file == nullptr) {
LOG(FATAL) << "Failed to open dex file " << dex_file_location
- << " from within oat file " << oat_file.GetLocation()
+ << " from within oat file " << oat_file->GetLocation()
<< " error '" << error_msg << "'";
UNREACHABLE();
}
@@ -1361,9 +1026,9 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class_roots_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
VisitClassRoots(visitor, flags);
array_iftable_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- for (GcRoot<mirror::Class>& root : find_array_class_cache_) {
- root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- }
+ // Instead of visiting the find_array_class_cache_ drop it so that it doesn't prevent class
+ // unloading if we are marking roots.
+ DropFindArrayClassCache();
}
class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
@@ -1508,7 +1173,6 @@ ClassLinker::~ClassLinker() {
mirror::IntArray::ResetArrayClass();
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
- STLDeleteElements(&oat_files_);
Thread* const self = Thread::Current();
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (const ClassLoaderData& data : class_loaders_) {
@@ -1525,7 +1189,9 @@ mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length
static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
-mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc) {
StackHandleScope<6> hs(self);
auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
@@ -1540,22 +1206,20 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
}
DexCacheArraysLayout layout(image_pointer_size_, &dex_file);
uint8_t* raw_arrays = nullptr;
- if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
+ if (dex_file.GetOatDexFile() != nullptr &&
+ dex_file.GetOatDexFile()->GetDexCacheArrays() != nullptr) {
+ raw_arrays = const_cast<uint8_t*>(dex_file.GetOatDexFile()->GetDexCacheArrays());
+ } else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
// NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
- if (sizeof(void*) == 8u && image_pointer_size_ == 4u) {
- // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
- // in the low 4GiB address space so that we can store pointers in 32-bit fields.
- // This is conveniently provided by the linear allocator.
- raw_arrays = reinterpret_cast<uint8_t*>(
- Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size())); // Zero-initialized.
- } else {
- raw_arrays = reinterpret_cast<uint8_t*>(calloc(layout.Size(), 1u)); // Zero-initialized.
- if (raw_arrays == nullptr) {
- return nullptr;
- }
- }
+ // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
+ // in the low 4GiB address space so that we can store pointers in 32-bit fields.
+ // This is conveniently provided by the linear allocator.
+ raw_arrays = reinterpret_cast<uint8_t*>(
+ (sizeof(void*) == 8u && image_pointer_size_ == 4u)
+ ? Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size()) // Zero-initialized.
+ : linear_alloc->Alloc(self, layout.Size())); // Zero-initialized.
}
GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
@@ -1768,13 +1432,18 @@ bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable&
break;
}
int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
+ // First element is the oat file.
+ for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
long_array->GetWithoutChecks(j)));
const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
- mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
- *cp_dex_file, *dex_class_def);
+ mirror::Class* klass = DefineClass(self,
+ descriptor,
+ hash,
+ class_loader,
+ *cp_dex_file,
+ *dex_class_def);
if (klass == nullptr) {
CHECK(self->IsExceptionPending()) << descriptor;
self->ClearException();
@@ -1921,7 +1590,9 @@ mirror::Class* ClassLinker::DefineClass(Thread* self,
self->AssertPendingOOMException();
return nullptr;
}
- mirror::DexCache* dex_cache = RegisterDexFile(dex_file);
+ mirror::DexCache* dex_cache = RegisterDexFile(
+ dex_file,
+ GetOrCreateAllocatorForClassLoader(class_loader.Get()));
if (dex_cache == nullptr) {
self->AssertPendingOOMException();
return nullptr;
@@ -2424,6 +2095,19 @@ LinearAlloc* ClassLinker::GetAllocatorForClassLoader(mirror::ClassLoader* class_
return allocator;
}
+LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) {
+ if (class_loader == nullptr) {
+ return Runtime::Current()->GetLinearAlloc();
+ }
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ LinearAlloc* allocator = class_loader->GetAllocator();
+ if (allocator == nullptr) {
+ allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loader->SetAllocator(allocator);
+ }
+ return allocator;
+}
+
void ClassLinker::LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
@@ -2582,7 +2266,10 @@ void ClassLinker::LoadMethod(Thread* self,
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(
+ self,
+ dex_file,
+ Runtime::Current()->GetLinearAlloc())));
CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
<< dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
@@ -2618,7 +2305,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
dex_cache->SetDexFile(&dex_file);
}
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
@@ -2631,7 +2318,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file, linear_alloc)));
WriterMutexLock mu(self, dex_lock_);
mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
@@ -3428,6 +3115,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
std::string descriptor(GetDescriptorForProxy(klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ // Needs to be before we insert the class so that the allocator field is set.
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
@@ -3435,9 +3125,6 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
CHECK(existing == nullptr);
- // Needs to be after we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
-
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
@@ -3676,6 +3363,18 @@ bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_stati
return false;
}
}
+ // If we are a class we need to initialize all interfaces with default methods when we are
+ // initialized. Check all of them.
+ if (!klass->IsInterface()) {
+ size_t num_interfaces = klass->GetIfTableCount();
+ for (size_t i = 0; i < num_interfaces; i++) {
+ mirror::Class* iface = klass->GetIfTable()->GetInterface(i);
+ if (iface->HasDefaultMethods() &&
+ !CanWeInitializeClass(iface, can_init_statics, can_init_parents)) {
+ return false;
+ }
+ }
+ }
}
if (klass->IsInterface() || !klass->HasSuperClass()) {
return true;
@@ -3802,6 +3501,38 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
}
}
+ if (!klass->IsInterface()) {
+ // Initialize interfaces with default methods for the JLS.
+ size_t num_direct_interfaces = klass->NumDirectInterfaces();
+ // Only setup the (expensive) handle scope if we actually need to.
+ if (UNLIKELY(num_direct_interfaces > 0)) {
+ StackHandleScope<1> hs_iface(self);
+ MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr));
+ for (size_t i = 0; i < num_direct_interfaces; i++) {
+ handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass, i));
+ CHECK(handle_scope_iface.Get() != nullptr);
+ CHECK(handle_scope_iface->IsInterface());
+ if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
+ // We have already done this for this interface. Skip it.
+ continue;
+ }
+ // We cannot just call initialize class directly because we need to ensure that ALL
+ // interfaces with default methods are initialized. Non-default interface initialization
+ // will not affect other non-default super-interfaces.
+ bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
+ handle_scope_iface,
+ can_init_statics,
+ can_init_parents);
+ if (!iface_initialized) {
+ ObjectLock<mirror::Class> lock(self, klass);
+ // Initialization failed because one of our interfaces with default methods is erroneous.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ return false;
+ }
+ }
+ }
+ }
+
const size_t num_static_fields = klass->NumStaticFields();
if (num_static_fields > 0) {
const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
@@ -3891,6 +3622,52 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
return success;
}
+// We recursively run down the tree of interfaces. We need to do this in the order they are declared
+// and perform the initialization only on those interfaces that contain default methods.
+bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
+ Handle<mirror::Class> iface,
+ bool can_init_statics,
+ bool can_init_parents) {
+ CHECK(iface->IsInterface());
+ size_t num_direct_ifaces = iface->NumDirectInterfaces();
+ // Only create the (expensive) handle scope if we need it.
+ if (UNLIKELY(num_direct_ifaces > 0)) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
+ // First we initialize all of iface's super-interfaces recursively.
+ for (size_t i = 0; i < num_direct_ifaces; i++) {
+ mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ if (!super_iface->HasBeenRecursivelyInitialized()) {
+ // Recursive step
+ handle_super_iface.Assign(super_iface);
+ if (!InitializeDefaultInterfaceRecursive(self,
+ handle_super_iface,
+ can_init_statics,
+ can_init_parents)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ bool result = true;
+ // Then we initialize 'iface' if it has default methods. We do not need to (and in fact must not)
+ // initialize if we don't have default methods.
+ if (iface->HasDefaultMethods()) {
+ result = EnsureInitialized(self, iface, can_init_statics, can_init_parents);
+ }
+
+ // Mark that this interface has undergone recursive default interface initialization so we know we
+ // can skip it on any later class initializations. We do this even if we are not a default
+ // interface since we can still avoid the traversal. This is purely a performance optimization.
+ if (result) {
+ // TODO This should be done in a better way
+ ObjectLock<mirror::Class> lock(self, iface);
+ iface->SetRecursivelyInitialized();
+ }
+ return result;
+}
+
bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock)
@@ -4193,13 +3970,13 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
ClassLoaderData data;
data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
data.class_table = class_table;
- data.allocator = Runtime::Current()->CreateLinearAlloc();
- class_loaders_.push_back(data);
// Don't already have a class table, add it to the class loader.
CHECK(class_loader->GetClassTable() == nullptr);
- CHECK(class_loader->GetAllocator() == nullptr);
class_loader->SetClassTable(data.class_table);
- class_loader->SetAllocator(data.allocator);
+ // Should have been set when we registered the dex file.
+ data.allocator = class_loader->GetAllocator();
+ CHECK(data.allocator != nullptr);
+ class_loaders_.push_back(data);
}
return class_table;
}
@@ -4623,20 +4400,16 @@ bool ClassLinker::LinkMethods(Thread* self,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
ArtMethod** out_imt) {
self->AllowThreadSuspension();
- if (klass->IsInterface()) {
- // No vtable.
- size_t count = klass->NumVirtualMethods();
- if (!IsUint<16>(count)) {
- ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
- return false;
- }
- for (size_t i = 0; i < count; ++i) {
- klass->GetVirtualMethodDuringLinking(i, image_pointer_size_)->SetMethodIndex(i);
- }
- } else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first.
- return false;
- }
- return LinkInterfaceMethods(self, klass, interfaces, out_imt); // Link interface method last.
+ // A map from vtable indexes to the method they need to be updated to point to. Used because we
+ // need to have default methods be in the virtuals array of each class but we don't set that up
+ // until LinkInterfaceMethods.
+ std::unordered_map<size_t, ArtMethod*> default_translations;
+ // Link virtual methods then interface methods.
+ // We set up the interface lookup table first because we need it to determine if we need to update
+ // any vtable entries with new default method implementations.
+ return SetupInterfaceLookupTable(self, klass, interfaces)
+ && LinkVirtualMethods(self, klass, /*out*/ &default_translations)
+ && LinkInterfaceMethods(self, klass, default_translations, out_imt);
}
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
@@ -4760,9 +4533,36 @@ class LinkVirtualHashTable {
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
-bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
+bool ClassLinker::LinkVirtualMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations) {
const size_t num_virtual_methods = klass->NumVirtualMethods();
- if (klass->HasSuperClass()) {
+ if (klass->IsInterface()) {
+ // No vtable.
+ if (!IsUint<16>(num_virtual_methods)) {
+ ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zu", num_virtual_methods);
+ return false;
+ }
+ bool has_defaults = false;
+ // TODO May need to replace this with real VTable for invoke_super
+ // Assign each method an IMT index and set the default flag.
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ ArtMethod* m = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
+ m->SetMethodIndex(i);
+ if (!m->IsAbstract()) {
+ m->SetAccessFlags(m->GetAccessFlags() | kAccDefault);
+ has_defaults = true;
+ }
+ }
+ // Mark that we have default methods so that we won't need to scan the virtual_methods_ array
+ // during initialization. This is a performance optimization. We could simply traverse the
+ // virtual_methods_ array again during initialization.
+ if (has_defaults) {
+ klass->SetHasDefaultMethods();
+ }
+ return true;
+ } else if (klass->HasSuperClass()) {
const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
const size_t max_count = num_virtual_methods + super_vtable_length;
StackHandleScope<2> hs(self);
@@ -4778,14 +4578,22 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
vtable->SetElementPtrSize(
i, super_class->GetEmbeddedVTableEntry(i, image_pointer_size_), image_pointer_size_);
}
- if (num_virtual_methods == 0) {
+ // We might need to change vtable if we have new virtual methods or new interfaces (since that
+ // might give us new default methods). If no new interfaces then we can skip the rest since
+ // the class cannot override any of the super-class's methods. This is required for
+ // correctness since without it we might not update overridden default method vtable entries
+ // correctly.
+ if (num_virtual_methods == 0 && super_class->GetIfTableCount() == klass->GetIfTableCount()) {
klass->SetVTable(vtable.Get());
return true;
}
} else {
+ DCHECK(super_class->IsAbstract() && !super_class->IsArrayClass());
auto* super_vtable = super_class->GetVTable();
CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get());
- if (num_virtual_methods == 0) {
+ // We might need to change vtable if we have new virtual methods or new interfaces (since that
+ // might give us new default methods). See comment above.
+ if (num_virtual_methods == 0 && super_class->GetIfTableCount() == klass->GetIfTableCount()) {
klass->SetVTable(super_vtable);
return true;
}
@@ -4806,7 +4614,9 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
// the need for the initial vtable which we later shrink back down).
// 3. Add non overridden methods to the end of the vtable.
static constexpr size_t kMaxStackHash = 250;
- const size_t hash_table_size = num_virtual_methods * 3;
+ // + 1 so that even if we only have new default methods we will still be able to use this hash
+ // table (i.e. it will never have 0 size).
+ const size_t hash_table_size = num_virtual_methods * 3 + 1;
uint32_t* hash_table_ptr;
std::unique_ptr<uint32_t[]> hash_heap_storage;
if (hash_table_size <= kMaxStackHash) {
@@ -4823,10 +4633,10 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
i, image_pointer_size_)->GetDeclaringClass() != nullptr);
hash_table.Add(i);
}
- // Loop through each super vtable method and see if they are overriden by a method we added to
+ // Loop through each super vtable method and see if they are overridden by a method we added to
// the hash table.
for (size_t j = 0; j < super_vtable_length; ++j) {
- // Search the hash table to see if we are overidden by any method.
+ // Search the hash table to see if we are overridden by any method.
ArtMethod* super_method = vtable->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
MethodNameAndSignatureComparator super_method_name_comparator(
super_method->GetInterfaceMethodIfProxy(image_pointer_size_));
@@ -4849,10 +4659,51 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
<< " would have incorrectly overridden the package-private method in "
<< PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
}
+ } else if (super_method->IsDefault()) {
+ // We didn't directly override this method but we might through default methods...
+ // Check for default method update.
+ ArtMethod* default_method = nullptr;
+ std::string icce_message;
+ if (!FindDefaultMethodImplementation(self,
+ super_method,
+ klass,
+ /*out*/&default_method,
+ /*out*/&icce_message)) {
+ // An error occurred while finding default methods.
+ // TODO This should actually be thrown when we attempt to invoke this method.
+ ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
+ return false;
+ }
+ // This should always work because we inherit superclass interfaces. We should either get
+ // 1) An IncompatibleClassChangeError because of conflicting default method
+ // implementations.
+ // 2) The same default method implementation as the superclass.
+ // 3) A default method that overrides the superclass's.
+ // Therefore this check should never fail.
+ CHECK(default_method != nullptr);
+ if (UNLIKELY(default_method->GetDeclaringClass() != super_method->GetDeclaringClass())) {
+ // TODO Refactor this add default methods to virtuals here and not in
+ // LinkInterfaceMethods maybe.
+ // The problem is default methods might override previously present default-method or
+ // miranda-method vtable entries from the superclass. Unfortunately we need these to
+ // be entries in this class's virtuals. We do not give these entries there until
+ // LinkInterfaceMethods so we pass this map around to let it know which vtable
+ // entries need to be updated.
+ // Make a note that vtable entry j must be updated, store what it needs to be updated to.
+ // We will allocate a virtual method slot in LinkInterfaceMethods and fix it up then.
+ default_translations->insert({j, default_method});
+ VLOG(class_linker) << "Method " << PrettyMethod(super_method) << " overridden by default "
+ << PrettyMethod(default_method) << " in " << PrettyClass(klass.Get());
+ } else {
+ // They are the same method/no override
+ // Cannot do direct comparison because we had to copy the ArtMethod object into the
+ // superclass's vtable.
+ continue;
+ }
}
}
- // Add the non overridden methods at the end.
size_t actual_count = super_vtable_length;
+ // Add the non-overridden methods at the end.
for (size_t i = 0; i < num_virtual_methods; ++i) {
ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
size_t method_idx = local_method->GetMethodIndexDuringLinking();
@@ -4900,20 +4751,223 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
return true;
}
-bool ClassLinker::LinkInterfaceMethods(Thread* self,
- Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt) {
- StackHandleScope<3> hs(self);
- Runtime* const runtime = Runtime::Current();
- const bool has_superclass = klass->HasSuperClass();
- const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+// Find the default method implementation for 'interface_method' in 'klass'. Stores it into
+// out_default_method and returns true on success. If no default method was found stores nullptr
+// into out_default_method and returns true. If an error occurs (such as a default_method conflict)
+// it will fill the icce_message with an appropriate message for an IncompatibleClassChangeError,
+// which should then be thrown by the caller.
+bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method,
+ /*out*/std::string* icce_message) const {
+ DCHECK(self != nullptr);
+ DCHECK(target_method != nullptr);
+ DCHECK(out_default_method != nullptr);
+ DCHECK(icce_message != nullptr);
+
+ *out_default_method = nullptr;
+ mirror::Class* chosen_iface = nullptr;
+
+ // We organize the interface table so that, for interface I any subinterfaces J follow it in the
+ // table. This lets us walk the table backwards when searching for default methods. The first one
+ // we encounter is the best candidate since it is the most specific. Once we have found it we keep
+ // track of it and then continue checking all other interfaces, since we need to throw an error if
+ // we encounter conflicting default method implementations (one is not a subtype of the other).
+ //
+ // The order of unrelated interfaces does not matter and is not defined.
+ size_t iftable_count = klass->GetIfTableCount();
+ if (iftable_count == 0) {
+ // No interfaces. We have already reset out to null so just return true.
+ return true;
+ }
+
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
+ MethodNameAndSignatureComparator target_name_comparator(
+ target_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ // Iterates over the klass's iftable in reverse
+ // We have a break at the end because size_t is unsigned.
+ for (size_t k = iftable_count - 1; /* break if k == 0 at end */; --k) {
+ DCHECK_LT(k, iftable->Count());
+ mirror::Class* iface = iftable->GetInterface(k);
+ size_t num_instance_methods = iface->NumVirtualMethods();
+ // Iterate through every method on this interface. The order does not matter so we go forwards.
+ for (size_t m = 0; m < num_instance_methods; m++) {
+ ArtMethod* current_method = iface->GetVirtualMethodUnchecked(m, image_pointer_size_);
+ // Skip abstract methods and methods with different names.
+ if (current_method->IsAbstract() ||
+ !target_name_comparator.HasSameNameAndSignature(
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) {
+ continue;
+ }
+ // The verifier should have caught the non-public method.
+ DCHECK(current_method->IsPublic()) << "Interface method is not public!";
+ if (UNLIKELY(chosen_iface != nullptr)) {
+ // We have multiple default impls of the same method. We need to check they do not
+ // conflict and throw an error if they do. Conflicting means that the current iface is not
+ // masked by the chosen interface.
+ if (!iface->IsAssignableFrom(chosen_iface)) {
+ *icce_message = StringPrintf("Conflicting default method implementations: '%s' and '%s'",
+ PrettyMethod(current_method).c_str(),
+ PrettyMethod(*out_default_method).c_str());
+ return false;
+ } else {
+ break; // Continue checking at the next interface.
+ }
+ } else {
+ *out_default_method = current_method;
+ chosen_iface = iface;
+ // We should now finish traversing the graph to find if we have default methods that
+ // conflict.
+ break;
+ }
+ }
+ if (k == 0) {
+ break;
+ }
+ }
+ return true;
+}
+
+// Sets imt_ref appropriately for LinkInterfaceMethods.
+// If there is no method in the imt location of imt_ref it will store the given method there.
+// Otherwise it will set the conflict method which will figure out which method to use during
+// runtime.
+static void SetIMTRef(ArtMethod* unimplemented_method,
+ ArtMethod* conflict_method,
+ size_t image_pointer_size,
+ ArtMethod* current_method,
+ /*out*/ArtMethod** imt_ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Place method in imt if entry is empty, place conflict otherwise.
+ if (*imt_ref == unimplemented_method) {
+ *imt_ref = current_method;
+ } else if (*imt_ref != conflict_method) {
+ // If we are not a conflict and we have the same signature and name as the imt
+ // entry, it must be that we overwrote a superclass vtable entry.
+ MethodNameAndSignatureComparator imt_comparator(
+ (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size));
+ if (imt_comparator.HasSameNameAndSignature(
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size))) {
+ *imt_ref = current_method;
+ } else {
+ *imt_ref = conflict_method;
+ }
+ }
+}
+
+// Simple helper function that checks that no subtypes of 'val' are contained within the 'classes'
+// set.
+static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes,
+ mirror::Class* val)
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(val != nullptr);
+ for (auto c : classes) {
+ if (val->IsAssignableFrom(&*c)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Fills in and flattens the interface inheritance hierarchy.
+//
+// By the end of this function all interfaces in the transitive closure of to_process are added to
+// the iftable and every interface precedes all of its sub-interfaces in this list.
+//
+// all I, J: Interface | I <: J implies J precedes I
+//
+// (note A <: B means that A is a subtype of B)
+//
+// This returns the total number of items in the iftable. The iftable might be resized down after
+// this call.
+//
+// We order this backwards so that we do not need to reorder superclass interfaces when new
+// interfaces are added in subclass's interface tables.
+//
+// Upon entry into this function iftable is a copy of the superclass's iftable with the first
+// super_ifcount entries filled in with the transitive closure of the interfaces of the superclass.
+// The other entries are uninitialized. We will fill in the remaining entries in this function. The
+// iftable must be large enough to hold all interfaces without changing its size.
+static size_t FillIfTable(mirror::IfTable* iftable,
+ size_t super_ifcount,
+ std::vector<mirror::Class*> to_process)
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // This is the set of all class's already in the iftable. Used to make checking if a class has
+ // already been added quicker.
+ std::unordered_set<mirror::Class*> classes_in_iftable;
+ // The first super_ifcount elements are from the superclass. We note that they are already added.
+ for (size_t i = 0; i < super_ifcount; i++) {
+ mirror::Class* iface = iftable->GetInterface(i);
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, iface)) << "Bad ordering.";
+ classes_in_iftable.insert(iface);
+ }
+ size_t filled_ifcount = super_ifcount;
+ for (mirror::Class* interface : to_process) {
+ // Let us call the first filled_ifcount elements of iftable the current-iface-list.
+ // At this point in the loop current-iface-list has the invariant that:
+ // for every pair of interfaces I,J within it:
+ // if index_of(I) < index_of(J) then I is not a subtype of J
+
+ // If we have already seen this element then all of its super-interfaces must already be in the
+ // current-iface-list so we can skip adding it.
+ if (!ContainsElement(classes_in_iftable, interface)) {
+ // We haven't seen this interface so add all of its super-interfaces onto the
+ // current-iface-list, skipping those already on it.
+ int32_t ifcount = interface->GetIfTableCount();
+ for (int32_t j = 0; j < ifcount; j++) {
+ mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
+ if (!ContainsElement(classes_in_iftable, super_interface)) {
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, super_interface)) << "Bad ordering.";
+ classes_in_iftable.insert(super_interface);
+ iftable->SetInterface(filled_ifcount, super_interface);
+ filled_ifcount++;
+ }
+ }
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, interface)) << "Bad ordering";
+ // Place this interface onto the current-iface-list after all of its super-interfaces.
+ classes_in_iftable.insert(interface);
+ iftable->SetInterface(filled_ifcount, interface);
+ filled_ifcount++;
+ } else if (kIsDebugBuild) {
+ // Check all super-interfaces are already in the list.
+ int32_t ifcount = interface->GetIfTableCount();
+ for (int32_t j = 0; j < ifcount; j++) {
+ mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
+ DCHECK(ContainsElement(classes_in_iftable, super_interface))
+ << "Iftable does not contain " << PrettyClass(super_interface)
+ << ", a superinterface of " << PrettyClass(interface);
+ }
+ }
+ }
+ if (kIsDebugBuild) {
+ // Check that the iftable is ordered correctly.
+ for (size_t i = 0; i < filled_ifcount; i++) {
+ mirror::Class* if_a = iftable->GetInterface(i);
+ for (size_t j = i + 1; j < filled_ifcount; j++) {
+ mirror::Class* if_b = iftable->GetInterface(j);
+ // !(if_a <: if_b)
+ CHECK(!if_b->IsAssignableFrom(if_a))
+ << "Bad interface order: " << PrettyClass(if_a) << " (index " << i << ") extends "
+ << PrettyClass(if_b) << " (index " << j << ") and so should be after it in the "
+ << "interface list.";
+ }
+ }
+ }
+ return filled_ifcount;
+}
+
+bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+ StackHandleScope<1> hs(self);
+ const size_t super_ifcount =
+ klass->HasSuperClass() ? klass->GetSuperClass()->GetIfTableCount() : 0U;
const bool have_interfaces = interfaces.Get() != nullptr;
- const size_t num_interfaces = have_interfaces
- ? interfaces->GetLength()
- : klass->NumDirectInterfaces();
- const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
- const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t num_interfaces =
+ have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
if (num_interfaces == 0) {
if (super_ifcount == 0) {
// Class implements no interfaces.
@@ -4937,6 +4991,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
}
size_t ifcount = super_ifcount + num_interfaces;
+ // Check that every class being implemented is an interface.
for (size_t i = 0; i < num_interfaces; i++) {
mirror::Class* interface = have_interfaces
? interfaces->GetWithoutChecks(i)
@@ -4952,11 +5007,13 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
ifcount += interface->GetIfTableCount();
}
+ // Create the interface function table.
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == nullptr)) {
self->AssertPendingOOMException();
return false;
}
+ // Fill in table with superclass's iftable.
if (super_ifcount != 0) {
mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
for (size_t i = 0; i < super_ifcount; i++) {
@@ -4964,56 +5021,59 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
iftable->SetInterface(i, super_interface);
}
}
+
+ // Note that AllowThreadSuspension is to thread suspension as pthread_testcancel is to pthread
+ // cancellation. That is it will suspend if one has a pending suspend request but otherwise
+ // doesn't really do anything.
self->AllowThreadSuspension();
- // Flatten the interface inheritance hierarchy.
- size_t idx = super_ifcount;
- for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
- mirror::Class::GetDirectInterface(self, klass, i);
- // Check if interface is already in iftable
- bool duplicate = false;
- for (size_t j = 0; j < idx; j++) {
- mirror::Class* existing_interface = iftable->GetInterface(j);
- if (existing_interface == interface) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate) {
- // Add this non-duplicate interface.
- iftable->SetInterface(idx++, interface);
- // Add this interface's non-duplicate super-interfaces.
- for (int32_t j = 0; j < interface->GetIfTableCount(); j++) {
- mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
- bool super_duplicate = false;
- for (size_t k = 0; k < idx; k++) {
- mirror::Class* existing_interface = iftable->GetInterface(k);
- if (existing_interface == super_interface) {
- super_duplicate = true;
- break;
- }
- }
- if (!super_duplicate) {
- iftable->SetInterface(idx++, super_interface);
- }
- }
+
+ size_t new_ifcount;
+ {
+ ScopedAssertNoThreadSuspension nts(self, "Copying mirror::Class*'s for FillIfTable");
+ std::vector<mirror::Class*> to_add;
+ for (size_t i = 0; i < num_interfaces; i++) {
+ mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+ mirror::Class::GetDirectInterface(self, klass, i);
+ to_add.push_back(interface);
}
+
+ new_ifcount = FillIfTable(iftable.Get(), super_ifcount, std::move(to_add));
}
+
self->AllowThreadSuspension();
+
// Shrink iftable in case duplicates were found
- if (idx < ifcount) {
+ if (new_ifcount < ifcount) {
DCHECK_NE(num_interfaces, 0U);
iftable.Assign(down_cast<mirror::IfTable*>(
- iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
+ iftable->CopyOf(self, new_ifcount * mirror::IfTable::kMax)));
if (UNLIKELY(iftable.Get() == nullptr)) {
self->AssertPendingOOMException();
return false;
}
- ifcount = idx;
+ ifcount = new_ifcount;
} else {
- DCHECK_EQ(idx, ifcount);
+ DCHECK_EQ(new_ifcount, ifcount);
}
klass->SetIfTable(iftable.Get());
+ return true;
+}
+
+bool ClassLinker::LinkInterfaceMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ const std::unordered_map<size_t, ArtMethod*>& default_translations,
+ ArtMethod** out_imt) {
+ StackHandleScope<3> hs(self);
+ Runtime* const runtime = Runtime::Current();
+ const bool has_superclass = klass->HasSuperClass();
+ const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t ifcount = klass->GetIfTableCount();
+
+ MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
+
// If we're an interface, we don't need the vtable pointers, so we're done.
if (klass->IsInterface()) {
return true;
@@ -5026,6 +5086,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
ArenaStack stack(runtime->GetLinearAlloc()->GetArenaPool());
ScopedArenaAllocator allocator(&stack);
ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter());
+ ScopedArenaVector<ArtMethod*> default_methods(allocator.Adapter());
MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
@@ -5055,7 +5116,9 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
for (size_t j = 0; j < num_virtuals; ++j) {
auto method = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
DCHECK(method != nullptr) << PrettyClass(super_class);
- if (method->IsMiranda()) {
+ // Miranda methods cannot be used to implement an interface method and defaults should be
+ // skipped in case we override it.
+ if (method->IsDefault() || method->IsMiranda()) {
continue;
}
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
@@ -5076,6 +5139,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
const bool is_super = i < super_ifcount;
+ // This is an interface implemented by a super-class. Therefore we can just copy the method
+ // array from the superclass.
const bool super_interface = is_super && extend_super_iftable;
mirror::PointerArray* method_array;
if (super_interface) {
@@ -5119,16 +5184,13 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
input_vtable_array = vtable;
input_array_length = input_vtable_array->GetLength();
}
- if (input_array_length == 0) {
- // If the added virtual methods is empty, do nothing.
- DCHECK(super_interface);
- continue;
- }
+ // For each method in interface
for (size_t j = 0; j < num_methods; ++j) {
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- int32_t k;
+ uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
// subclass over the superclass, which just requires walking
@@ -5137,7 +5199,12 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
// it -- otherwise it would use the same vtable slot. In .dex files
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
- for (k = input_array_length - 1; k >= 0; --k) {
+ //
+ // To find defaults we need to do the same but also go over interfaces.
+ bool found_impl = false;
+ ArtMethod* default_impl = nullptr;
+ bool found_default_impl = false;
+ for (int32_t k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
&input_virtual_methods->At(k, method_size, method_alignment) :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
@@ -5153,25 +5220,69 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str());
return false;
+ } else if (vtable_method->IsDefault()) {
+ // We might have a newer, better, default method for this, so we just skip it. If we
+ // are still using this we will select it again when scanning for default methods. To
+ // obviate the need to copy the method again we will make a note that we already found
+ // a default here.
+ // TODO This should be much cleaner.
+ found_default_impl = true;
+ default_impl = vtable_method;
+ break;
+ } else {
+ found_impl = true;
}
method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
// Place method in imt if entry is empty, place conflict otherwise.
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- auto** imt_ref = &out_imt[imt_index];
- if (*imt_ref == unimplemented_method) {
- *imt_ref = vtable_method;
- } else if (*imt_ref != conflict_method) {
- // If we are not a conflict and we have the same signature and name as the imt entry,
- // it must be that we overwrote a superclass vtable entry.
- MethodNameAndSignatureComparator imt_comparator(
- (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_));
- *imt_ref = imt_comparator.HasSameNameAndSignature(vtable_method_for_name_comparison) ?
- vtable_method : conflict_method;
- }
+ SetIMTRef(unimplemented_method,
+ conflict_method,
+ image_pointer_size_,
+ vtable_method,
+ /*out*/imt_ptr);
break;
}
}
- if (k < 0 && !super_interface) {
+ // We should only search for default implementations when the class does not implement the
+ // method directly and either (1) the interface is newly implemented on this class and not
+ // on any of its superclasses, (2) the superclass's implementation is a default method, or
+ // (3) the superclass does not have an implementation.
+ if (!found_impl && (!super_interface ||
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_)
+ ->IsOverridableByDefaultMethod())) {
+ ArtMethod* current_method = nullptr;
+ std::string icce_message;
+ if (!FindDefaultMethodImplementation(self,
+ interface_method,
+ klass,
+ /*out*/&current_method,
+ /*out*/&icce_message)) {
+ // There was a conflict with default method implementations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ // TODO This should actually be thrown when we attempt to invoke this method.
+ ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
+ return false;
+ } else if (current_method != nullptr) {
+ if (found_default_impl &&
+ current_method->GetDeclaringClass() == default_impl->GetDeclaringClass()) {
+ // We found a default method but it was the same one we already have from our
+ // superclass. Don't bother adding it to our vtable again.
+ current_method = default_impl;
+ } else {
+ // We found a default method implementation and there were no conflicts.
+ // Save the default method. We need to add it to the vtable.
+ default_methods.push_back(current_method);
+ }
+ method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
+ SetIMTRef(unimplemented_method,
+ conflict_method,
+ image_pointer_size_,
+ current_method,
+ /*out*/imt_ptr);
+ found_impl = true;
+ }
+ }
+ if (!found_impl && !super_interface) {
+ // It is defined in this class or any of its subclasses.
ArtMethod* miranda_method = nullptr;
for (auto& mir_method : miranda_methods) {
if (interface_name_comparator.HasSameNameAndSignature(mir_method)) {
@@ -5191,9 +5302,10 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
}
}
- if (!miranda_methods.empty()) {
+ if (!miranda_methods.empty() || !default_methods.empty()) {
const size_t old_method_count = klass->NumVirtualMethods();
- const size_t new_method_count = old_method_count + miranda_methods.size();
+ const size_t new_method_count =
+ old_method_count + miranda_methods.size() + default_methods.size();
// Attempt to realloc to save RAM if possible.
LengthPrefixedArray<ArtMethod>* old_virtuals = klass->GetVirtualMethodsPtr();
// The Realloced virtual methods aren't visiblef from the class roots, so there is no issue
@@ -5228,13 +5340,36 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count);
+ StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment)
+ + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
- out->CopyFrom(mir_method, image_pointer_size_);
- out->SetAccessFlags(out->GetAccessFlags() | kAccMiranda);
- move_table.emplace(mir_method, &*out);
+ ArtMethod& new_method = *out;
+ new_method.CopyFrom(mir_method, image_pointer_size_);
+ new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda);
+ DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
+ << "Miranda method should be abstract!";
+ move_table.emplace(mir_method, &new_method);
+ ++out;
+ }
+ // We need to copy the default methods into our own virtual method table since the runtime
+ // requires that every method on a class's vtable be in that respective class's virtual method
+ // table.
+ // NOTE This means that two classes might have the same implementation of a method from the same
+ // interface but will have different ArtMethod*s for them. This also means we cannot compare a
+ // default method found on a class with one found on the declaring interface directly and must
+ // look at the declaring class to determine if they are the same.
+ for (ArtMethod* def_method : default_methods) {
+ ArtMethod& new_method = *out;
+ new_method.CopyFrom(def_method, image_pointer_size_);
+ new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccDefault);
+ // Clear the preverified flag if it is present. Since this class hasn't been verified yet it
+ // shouldn't have methods that are preverified.
+ // TODO This is rather arbitrary. We should maybe support classes where only some of its
+ // methods are preverified.
+ new_method.SetAccessFlags(new_method.GetAccessFlags() & ~kAccPreverified);
+ move_table.emplace(def_method, &new_method);
++out;
}
virtuals->SetLength(new_method_count);
@@ -5244,7 +5379,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
self->EndAssertNoThreadSuspension(old_cause);
const size_t old_vtable_count = vtable->GetLength();
- const size_t new_vtable_count = old_vtable_count + miranda_methods.size();
+ const size_t new_vtable_count =
+ old_vtable_count + miranda_methods.size() + default_methods.size();
miranda_methods.clear();
vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
@@ -5261,15 +5397,29 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
++vtable_pos;
}
CHECK_EQ(vtable_pos, new_vtable_count);
- // Update old vtable methods.
+ // Update old vtable methods. We use the default_translations map to figure out what each vtable
+ // entry should be updated to, if they need to be at all.
for (size_t i = 0; i < old_vtable_count; ++i) {
- auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
- DCHECK(m != nullptr) << PrettyClass(klass.Get());
- auto it = move_table.find(m);
+ ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
+ // Try and find what we need to change this method to.
+ auto translation_it = default_translations.find(i);
+ bool found_translation = false;
+ if (translation_it != default_translations.end()) {
+ size_t vtable_index;
+ std::tie(vtable_index, translated_method) = *translation_it;
+ DCHECK_EQ(vtable_index, i);
+ found_translation = true;
+ }
+ DCHECK(translated_method != nullptr);
+ auto it = move_table.find(translated_method);
if (it != move_table.end()) {
- auto* new_m = it->second;
- DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
- vtable->SetElementPtrSize(i, new_m, image_pointer_size_);
+ auto* new_method = it->second;
+ DCHECK(new_method != nullptr);
+ vtable->SetElementPtrSize(i, new_method, image_pointer_size_);
+ } else {
+ // If it was not going to be updated we wouldn't have put it into the default_translations
+ // map.
+ CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
}
}
@@ -5300,7 +5450,11 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
for (size_t i = 0, count = klass->GetDexCache()->NumResolvedMethods(); i < count; ++i) {
auto* m = mirror::DexCache::GetElementPtrSize(resolved_methods, i, image_pointer_size_);
- CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
+ // We don't remove default methods from the move table since we need them to update the
+ // vtable. Therefore just skip them for this check.
+ if (!m->IsDefault()) {
+ CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
+ }
}
}
// Put some random garbage in old virtuals to help find stale pointers.
@@ -6075,7 +6229,8 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
}
bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
- if (Runtime::Current()->UseJit()) {
+ Runtime* const runtime = Runtime::Current();
+ if (runtime->UseJit()) {
// JIT can have direct code pointers from any method to any other method.
return true;
}
@@ -6097,13 +6252,7 @@ bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
} else {
// The method can be called outside its own oat file. Therefore it won't be called using its
// direct code pointer only if all loaded oat files have been compiled in PIC mode.
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (const OatFile* oat_file : oat_files_) {
- if (!oat_file->IsPic()) {
- return true;
- }
- }
- return false;
+ return runtime->GetOatFileManager().HaveNonPicOatFile();
}
}
@@ -6138,9 +6287,13 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
for (const DexFile* dex_file : dex_files) {
StackHandleScope<3> hs2(self);
- Handle<mirror::LongArray> h_long_array = hs2.NewHandle(mirror::LongArray::Alloc(self, 1));
+ // CreatePathClassLoader is only used by gtests. Index 0 of h_long_array is supposed to be the
+ // oat file but we can leave it null.
+ Handle<mirror::LongArray> h_long_array = hs2.NewHandle(mirror::LongArray::Alloc(
+ self,
+ kDexFileIndexStart + 1));
DCHECK(h_long_array.Get() != nullptr);
- h_long_array->Set(0, reinterpret_cast<intptr_t>(dex_file));
+ h_long_array->Set(kDexFileIndexStart, reinterpret_cast<intptr_t>(dex_file));
Handle<mirror::Object> h_dex_file = hs2.NewHandle(
cookie_field->GetDeclaringClass()->AllocObject(self));
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 7f3e93806e..a70967d49b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_LINKER_H_
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -318,24 +319,17 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- mirror::DexCache* RegisterDexFile(const DexFile& dex_file)
+ mirror::DexCache* RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- const OatFile* RegisterOatFile(const OatFile* oat_file)
- REQUIRES(!dex_lock_);
-
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
}
- // Returns the first non-image oat file in the class path.
- const OatFile* GetPrimaryOatFile()
- REQUIRES(!dex_lock_);
-
void VisitClasses(ClassVisitor* visitor)
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -364,26 +358,6 @@ class ClassLinker {
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Finds or creates the oat file holding dex_location. Then loads and returns
- // all corresponding dex files (there may be more than one dex file loaded
- // in the case of multidex).
- // This may return the original, unquickened dex files if the oat file could
- // not be generated.
- //
- // Returns an empty vector if the dex files could not be loaded. In this
- // case, there will be at least one error message returned describing why no
- // dex files could not be loaded. The 'error_msgs' argument must not be
- // null, regardless of whether there is an error or not.
- //
- // This method should not be called with the mutator_lock_ held, because it
- // could end up starving GC if we need to generate or relocate any oat
- // files.
- std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
- const char* dex_location,
- const char* oat_location,
- std::vector<std::string>* error_msgs)
- REQUIRES(!dex_lock_, !Locks::mutator_lock_);
-
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
@@ -558,6 +532,12 @@ class ClassLinker {
static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
+ // set it. TODO: Consider using a lock other than classlinker_classes_lock_.
+ static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
private:
struct ClassLoaderData {
jweak weak_root; // Weak root to enable class unloading.
@@ -581,10 +561,6 @@ class ClassLinker {
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- OatFile& GetImageOatFile(gc::space::ImageSpace* space)
- REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
void FinishInit(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
@@ -600,7 +576,9 @@ class ClassLinker {
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
+ mirror::DexCache* AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -679,6 +657,12 @@ class ClassLinker {
bool can_init_parents)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
+ bool InitializeDefaultInterfaceRecursive(Thread* self,
+ Handle<mirror::Class> klass,
+ bool can_run_clinit,
+ bool can_init_parents)
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock);
@@ -718,12 +702,65 @@ class ClassLinker {
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
+ // Links the virtual methods for the given class and records any default methods that will need to
+ // be updated later.
+ //
+ // Arguments:
+ // * self - The current thread.
+ // * klass - class, whose vtable will be filled in.
+ // * default_translations - Vtable index to new method map.
+ // Any vtable entries that need to be updated with new default methods
+ // are stored into the default_translations map. The default_translations
+ // map is keyed on the vtable index that needs to be updated. We use this
+ // map because if we override a default method with another default
+ // method we need to update the vtable to point to the new method.
+ // Unfortunately since we copy the ArtMethod* we cannot just do a simple
+ // scan, we therefore store the vtable index's that might need to be
+ // updated with the method they will turn into.
+ // TODO This whole default_translations thing is very dirty. There should be a better way.
+ bool LinkVirtualMethods(Thread* self,
+ Handle<mirror::Class> klass,
+ /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Sets up the interface lookup table (IFTable) in the correct order to allow searching for
+ // default methods.
+ bool SetupInterfaceLookupTable(Thread* self,
+ Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Find the default method implementation for 'interface_method' in 'klass', if one exists.
+ //
+ // Arguments:
+ // * self - The current thread.
+ // * target_method - The method we are trying to find a default implementation for.
+ // * klass - The class we are searching for a definition of target_method.
+ // * out_default_method - The pointer we will store the found default method to on success.
+ // * icce_message - A string we will store an appropriate IncompatibleClassChangeError message
+ // into in case of failure. Note we must do it this way since we do not know
+ // whether we can allocate the exception object, which could cause us to go to
+ // sleep.
+ //
+ // Return value:
+ // * True - There were no conflicting method implementations found in the class while searching
+ // for target_method. The default method implementation is stored into out_default_method
+ // if it was found. Otherwise *out_default_method will be set to nullptr.
+ // * False - Conflicting method implementations were found when searching for target_method. The
+ // value of *out_default_method is undefined and *icce_message is a string that should
+ // be used to create an IncompatibleClassChangeError as soon as possible.
+ bool FindDefaultMethodImplementation(Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method,
+ /*out*/std::string* icce_message) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Sets the imt entries and fixes up the vtable for the given class by linking all the interface
+ // methods. See LinkVirtualMethods for an explanation of what default_translations is.
bool LinkInterfaceMethods(Thread* self,
Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ const std::unordered_map<size_t, ArtMethod*>& default_translations,
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -758,12 +795,6 @@ class ClassLinker {
return dex_caches_;
}
- const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
- REQUIRES(!dex_lock_);
-
- // Returns the boot image oat file.
- const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_);
-
void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
SHARED_REQUIRES(Locks::mutator_lock_);
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
@@ -813,9 +844,6 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
- // Check for duplicate class definitions of the given oat file against all open oat files.
- bool HasCollisions(const OatFile* oat_file, std::string* error_msg) REQUIRES(!dex_lock_);
-
bool HasInitWithString(Thread* self, const char* descriptor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
@@ -834,7 +862,6 @@ class ClassLinker {
// JNI weak globals to allow dex caches to get unloaded. We lazily delete weak globals when we
// register new dex files.
std::list<jweak> dex_caches_ GUARDED_BY(dex_lock_);
- std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// This contains the class loaders which have class tables. It is populated by
// InsertClassTableForClassLoader.
@@ -880,8 +907,8 @@ class ClassLinker {
// Image pointer size.
size_t image_pointer_size_;
+ friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
- friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class JniInternalTest; // for GetRuntimeQuickGenericJniStub
ART_FRIEND_TEST(mirror::DexCacheTest, Open); // for AllocDexCache
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 56c5d1a2c3..b6b514177a 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -42,6 +42,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mem_map.h"
+#include "native/dalvik_system_DexFile.h"
#include "noop_compiler_callbacks.h"
#include "os.h"
#include "primitive.h"
@@ -516,7 +517,7 @@ std::vector<const DexFile*> CommonRuntimeTest::GetDexFiles(jobject jclass_loader
mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
DCHECK(long_array != nullptr);
int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
+ for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
long_array->GetWithoutChecks(j)));
if (cp_dex_file == nullptr) {
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 09416cc5c4..a5f9d09900 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -23,7 +23,9 @@
#include "base/stringprintf.h"
#include "dex_file-inl.h"
+#include "experimental_flags.h"
#include "leb128.h"
+#include "runtime.h"
#include "safe_map.h"
#include "utf-inl.h"
#include "utils.h"
@@ -2530,7 +2532,14 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
}
// Only the static initializer may have code in an interface.
- if (((class_access_flags & kAccInterface) != 0) && !is_clinit_by_name) {
+ // TODO We should have some way determine whether to allow this experimental flag without the
+ // runtime being started.
+ // We assume experimental flags are enabled when running without a runtime to enable tools like
+ // dexdump to handle dex files with these features.
+ if (((class_access_flags & kAccInterface) != 0)
+ && !is_clinit_by_name
+ && Runtime::Current() != nullptr
+ && !Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
*error_msg = StringPrintf("Non-clinit interface method %" PRIu32 " should not have code",
method_index);
return false;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f66628d7cb..21e4e445e6 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -34,6 +34,7 @@
#include "mirror/throwable.h"
#include "nth_caller_visitor.h"
#include "runtime.h"
+#include "stack_map.h"
#include "thread.h"
namespace art {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index f1939993f7..17e6aac357 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,6 +16,7 @@
#include "entrypoints/entrypoint_utils.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/mutex.h"
@@ -358,16 +359,17 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+ ArtCode current_code = GetCallingCodeFrom(caller_sp);
ArtMethod* outer_method = *caller_sp;
ArtMethod* caller = outer_method;
- if ((outer_method != nullptr) && outer_method->IsOptimized(sizeof(void*))) {
+ if ((outer_method != nullptr) && current_code.IsOptimized(sizeof(void*))) {
const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = current_code.NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = current_code.GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 4217cab697..171ace27a5 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,6 +20,7 @@
#include <jni.h>
#include <stdint.h>
+#include "art_code.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_instruction.h"
@@ -184,6 +185,10 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check = false);
+inline ArtCode GetCallingCodeFrom(ArtMethod** sp) {
+ return ArtCode(sp);
+}
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 3bf001e249..4adb39b9c6 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -21,8 +21,9 @@
namespace art {
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
@@ -41,8 +42,9 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
}
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d3ac73d77..377675ea85 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_code.h"
#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
@@ -294,7 +295,8 @@ class QuickArgumentVisitor {
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
- CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
+ CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
+ GetCallingCodeFrom(sp).GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -320,12 +322,11 @@ class QuickArgumentVisitor {
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- ArtMethod* outer_method = *caller_sp;
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
- uintptr_t outer_pc_offset = outer_method->NativeQuickPcOffset(outer_pc);
+ uintptr_t outer_pc_offset = GetCallingCodeFrom(caller_sp).NativeQuickPcOffset(outer_pc);
- if (outer_method->IsOptimized(sizeof(void*))) {
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ if (GetCallingCodeFrom(caller_sp).IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetCallingCodeFrom(caller_sp).GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -336,7 +337,7 @@ class QuickArgumentVisitor {
return stack_map.GetDexPc(encoding);
}
} else {
- return outer_method->ToDexPc(outer_pc);
+ return GetCallingCodeFrom(caller_sp).ToDexPc(outer_pc);
}
}
@@ -719,7 +720,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
uint16_t num_regs = code_item->registers_size_;
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, nullptr, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -841,8 +842,9 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
- DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
+ DCHECK_EQ(GetCallingCodeFrom(sp).GetFrameSizeInBytes(),
+ ArtCode(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs))
+ .GetFrameSizeInBytes())
<< PrettyMethod(proxy_method);
self->VerifyStack();
// Start new JNI local reference state.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 0b366944e4..5299394d7c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -49,7 +49,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec << " ISA " << isa;
@@ -58,8 +58,8 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
- EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset)
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ EXPECT_EQ(ArtCode(save_method).GetReturnPcOffset().SizeValue(), pc_offset)
<< "Expected and real pc offset differs for " << type
<< " core spills=" << std::hex << frame_info.CoreSpillMask()
<< " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 9f84bd2a39..da1d80ea33 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -169,7 +169,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
ASSERT_EQ(kStackAlignment, 16U);
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
@@ -187,14 +187,14 @@ TEST_F(ExceptionTest, StackTraceElement) {
}
fake_stack.push_back(
- method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
fake_stack.push_back(
- method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
new file mode 100644
index 0000000000..2e674e95c6
--- /dev/null
+++ b/runtime/experimental_flags.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
+#define ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
+
+#include <ostream>
+
+namespace art {
+
+// Possible experimental features that might be enabled.
+struct ExperimentalFlags {
+ // The actual flag values.
+ enum {
+ kNone = 0x0000,
+ kLambdas = 0x0001,
+ kDefaultMethods = 0x0002,
+ };
+
+ constexpr ExperimentalFlags() : value_(0x0000) {}
+ constexpr ExperimentalFlags(decltype(kNone) t) : value_(static_cast<uint32_t>(t)) {}
+
+ constexpr operator decltype(kNone)() const {
+ return static_cast<decltype(kNone)>(value_);
+ }
+
+ constexpr explicit operator bool() const {
+ return value_ != kNone;
+ }
+
+ constexpr ExperimentalFlags operator|(const decltype(kNone)& b) const {
+ return static_cast<decltype(kNone)>(value_ | static_cast<uint32_t>(b));
+ }
+ constexpr ExperimentalFlags operator|(const ExperimentalFlags& b) const {
+ return static_cast<decltype(kNone)>(value_ | b.value_);
+ }
+
+ constexpr ExperimentalFlags operator&(const ExperimentalFlags& b) const {
+ return static_cast<decltype(kNone)>(value_ & b.value_);
+ }
+ constexpr ExperimentalFlags operator&(const decltype(kNone)& b) const {
+ return static_cast<decltype(kNone)>(value_ & static_cast<uint32_t>(b));
+ }
+
+ constexpr bool operator==(const ExperimentalFlags& b) const {
+ return value_ == b.value_;
+ }
+
+ private:
+ uint32_t value_;
+};
+
+inline std::ostream& operator<<(std::ostream& stream, const ExperimentalFlags& e) {
+ bool started = false;
+ if (e & ExperimentalFlags::kLambdas) {
+ stream << (started ? "|" : "") << "kLambdas";
+ started = true;
+ }
+ if (e & ExperimentalFlags::kDefaultMethods) {
+ stream << (started ? "|" : "") << "kDefaultMethods";
+ started = true;
+ }
+ if (!started) {
+ stream << "kNone";
+ }
+ return stream;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const decltype(ExperimentalFlags::kNone)& e) {
+ return stream << ExperimentalFlags(e);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c3a962737f..30a0983986 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -20,6 +20,7 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
+#include "art_code.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "mirror/class.h"
@@ -359,16 +360,17 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
return false;
}
+ ArtCode art_code(method_obj);
+
// We can be certain that this is a method now. Check if we have a GC map
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj,
- sizeof(void*));
- uint32_t sought_offset = return_pc - reinterpret_cast<uintptr_t>(code);
+ uint32_t sought_offset = return_pc -
+ reinterpret_cast<uintptr_t>(art_code.GetQuickOatEntryPoint(sizeof(void*)));
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
- uint32_t dexpc = method_obj->ToDexPc(return_pc, false);
+ uint32_t dexpc = art_code.ToDexPc(return_pc, false);
VLOG(signals) << "dexpc: " << dexpc;
return !check_dex_pc || dexpc != DexFile::kDexNoIndex;
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1923d24805..ce64b10364 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -789,10 +789,13 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
CHECK(image_header.GetOatDataBegin() != nullptr);
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
+ OatFile* oat_file = OatFile::Open(oat_filename,
+ oat_filename,
+ image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
- nullptr, error_msg);
+ nullptr,
+ error_msg);
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
@@ -839,15 +842,13 @@ bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
return true;
}
-
const OatFile* ImageSpace::GetOatFile() const {
return oat_file_non_owned_;
}
-
-OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != nullptr);
- return oat_file_.release();
+std::unique_ptr<const OatFile> ImageSpace::ReleaseOatFile() {
+ CHECK(oat_file_ != nullptr);
+ return std::move(oat_file_);
}
void ImageSpace::Dump(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 215c18b8d9..99207426a0 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -62,9 +62,8 @@ class ImageSpace : public MemMapSpace {
const OatFile* GetOatFile() const;
// Releases the OatFile from the ImageSpace so it can be transfer to
- // the caller, presumably the ClassLinker.
- OatFile* ReleaseOatFile()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ // the caller, presumably the OatFileManager.
+ std::unique_ptr<const OatFile> ReleaseOatFile();
void VerifyImageAllocations()
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/image.cc b/runtime/image.cc
index 42b348ac58..192371fe75 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '2', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index deada4c5dc..2dd2b7d403 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,6 +19,7 @@
#include <sstream>
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "atomic.h"
#include "class_linker.h"
@@ -251,7 +252,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
instrumentation_stack_->insert(it, instrumentation_frame);
SetReturnPc(instrumentation_exit_pc_);
}
- dex_pcs_.push_back(m->ToDexPc(last_return_pc_));
+ dex_pcs_.push_back(GetCurrentCode().ToDexPc(last_return_pc_));
last_return_pc_ = return_pc;
++instrumentation_stack_depth_;
return true; // Continue.
@@ -960,6 +961,15 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread,
}
}
+// Computes a frame ID by ignoring inlined frames.
+size_t Instrumentation::ComputeFrameId(Thread* self,
+ size_t frame_depth,
+ size_t inlined_frames_before_frame) {
+ CHECK_GE(frame_depth, inlined_frames_before_frame);
+ size_t no_inline_depth = frame_depth - inlined_frames_before_frame;
+ return StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) - no_inline_depth;
+}
+
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 612ca14cf5..8dd2357e06 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -397,6 +397,11 @@ class Instrumentation {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_);
+ static size_t ComputeFrameId(Thread* self,
+ size_t frame_depth,
+ size_t inlined_frames_before_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2a76f94688..7c0594a8bb 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -333,7 +333,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
// Set up shadow frame with matching number of reference slots to vregs.
ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
self->PushShadowFrame(shadow_frame);
@@ -458,10 +458,11 @@ void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* co
}
self->PushShadowFrame(shadow_frame);
+ ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
- const bool is_static = shadow_frame->GetMethod()->IsStatic();
+ const bool is_static = method->IsStatic();
if (is_static) {
- mirror::Class* declaring_class = shadow_frame->GetMethod()->GetDeclaringClass();
+ mirror::Class* declaring_class = method->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
StackHandleScope<1> hs(self);
HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 6f5b843ee4..44eb29ed2c 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -619,7 +619,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Allocate shadow frame on the stack.
const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, 0);
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
// Initialize new shadow frame by copying the registers from the callee shadow frame.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f57bddbb4f..8c495fc7bb 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -45,6 +45,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "stack.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -79,12 +80,28 @@ extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
-static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorEnter(self);
+template <bool kMonitorCounting>
+static inline void DoMonitorEnter(Thread* self,
+ ShadowFrame* frame,
+ Object* ref)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<1> hs(self);
+ Handle<Object> h_ref(hs.NewHandle(ref));
+ h_ref->MonitorEnter(self);
+ frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, h_ref.Get());
}
-static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorExit(self);
+template <bool kMonitorCounting>
+static inline void DoMonitorExit(Thread* self,
+ ShadowFrame* frame,
+ Object* ref)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<1> hs(self);
+ Handle<Object> h_ref(hs.NewHandle(ref));
+ h_ref->MonitorExit(self);
+ frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, h_ref.Get());
}
void AbortTransactionF(Thread* self, const char* fmt, ...)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 9677d79de3..9766299ae0 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -19,6 +19,7 @@
#include "base/stl_util.h" // MakeUnique
+#include "experimental_flags.h"
#include "interpreter_common.h"
#include "safe_math.h"
@@ -83,12 +84,17 @@ namespace interpreter {
#define HANDLE_EXPERIMENTAL_INSTRUCTION_START(opcode) \
HANDLE_INSTRUCTION_START(opcode); \
DCHECK(inst->IsExperimental()); \
- if (Runtime::Current()->AreExperimentalLambdasEnabled()) {
+ if (Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas)) {
#define HANDLE_EXPERIMENTAL_INSTRUCTION_END() \
} else { \
UnexpectedOpcode(inst, shadow_frame); \
} HANDLE_INSTRUCTION_END();
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
/**
* Interpreter based on computed goto tables.
@@ -275,6 +281,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(RETURN_VOID_NO_BARRIER) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -289,6 +296,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -304,6 +312,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -318,6 +327,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -331,6 +341,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
if (do_assignability_check && obj_result != nullptr) {
@@ -468,7 +479,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -480,7 +491,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -2544,6 +2555,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
+ // Structured locking is to be enforced for abnormal termination, too.
+ shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self);
return JValue(); /* Handled in caller. */
} else {
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 083dfb5267..bf95a0e46f 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,6 +15,7 @@
*/
#include "base/stl_util.h" // MakeUnique
+#include "experimental_flags.h"
#include "interpreter_common.h"
#include "safe_math.h"
@@ -31,6 +32,9 @@ namespace interpreter {
inst->GetDexPc(insns), \
instrumentation); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
+ /* Structured locking is to be enforced for abnormal termination, too. */ \
+ shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
return JValue(); /* Handled in caller. */ \
} else { \
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
@@ -47,6 +51,12 @@ namespace interpreter {
} \
} while (false)
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
+
// Code to run before each dex instruction.
#define PREAMBLE() \
do { \
@@ -58,7 +68,7 @@ namespace interpreter {
static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
DCHECK(inst->IsExperimental());
- return Runtime::Current()->AreExperimentalLambdasEnabled();
+ return Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas);
}
template<bool do_access_check, bool transaction_active>
@@ -182,6 +192,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -194,6 +205,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -207,6 +219,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -219,6 +232,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -230,6 +244,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
@@ -366,7 +381,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
@@ -378,7 +393,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 6a6d1986dc..7b91b0b2b6 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -32,7 +32,7 @@ union PACKED(4) JValue {
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
- i = ((static_cast<int32_t>(new_b) << 24) >> 24); // Sign-extend.
+ j = ((static_cast<int64_t>(new_b) << 56) >> 56); // Sign-extend to 64 bits.
}
uint16_t GetC() const { return c; }
@@ -45,7 +45,9 @@ union PACKED(4) JValue {
void SetF(float new_f) { f = new_f; }
int32_t GetI() const { return i; }
- void SetI(int32_t new_i) { i = new_i; }
+ void SetI(int32_t new_i) {
+ j = ((static_cast<int64_t>(new_i) << 32) >> 32); // Sign-extend to 64 bits.
+ }
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
@@ -55,7 +57,7 @@ union PACKED(4) JValue {
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
- i = ((static_cast<int32_t>(new_s) << 16) >> 16); // Sign-extend.
+ j = ((static_cast<int64_t>(new_s) << 48) >> 48); // Sign-extend to 64 bits.
}
uint8_t GetZ() const { return z; }
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 2a019c5bae..2d3581da8f 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -134,11 +134,25 @@ static uintptr_t GenerateNextMemPos() {
uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
#endif
-// Return true if the address range is contained in a single /proc/self/map entry.
-static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size,
- std::string* error_msg) {
+// Return true if the address range is contained in a single memory map by either reading
+// the maps_ variable or the /proc/self/map entry.
+bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
uintptr_t end = begin + size;
+
+ // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
+ // further.
+ {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ for (auto& pair : *maps_) {
+ MemMap* const map = pair.second;
+ if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
+ end <= reinterpret_cast<uintptr_t>(map->End())) {
+ return true;
+ }
+ }
+ }
+
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to build process map");
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 196a7f6292..7c11cebcef 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -161,6 +161,8 @@ class MemMap {
REQUIRES(Locks::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
REQUIRES(Locks::mem_maps_lock_);
+ static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
+ REQUIRES(!Locks::mem_maps_lock_);
const std::string name_;
uint8_t* const begin_; // Start of data.
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 93f2aea38e..a528c3b890 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -392,7 +392,8 @@ inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t
}
inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) {
- DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
+ // Only miranda or default methods may come from interfaces and be used as a virtual.
+ DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsDefault() || method->IsMiranda());
// The argument method may from a super class.
// Use the index to a potentially overridden one for this instance's class.
return GetVTableEntry(method->GetMethodIndex(), pointer_size);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2668b3db9c..8219d69b6e 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -30,6 +30,7 @@
#include "primitive.h"
#include "read_barrier_option.h"
#include "stride_iterator.h"
+#include "thread.h"
#include "utils.h"
#ifndef IMT_SIZE
@@ -229,6 +230,18 @@ class MANAGED Class FINAL : public Object {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
+ ALWAYS_INLINE void SetRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ SetAccessFlags(flags | kAccRecursivelyInitialized);
+ }
+
+ ALWAYS_INLINE void SetHasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ SetAccessFlags(flags | kAccHasDefaultMethod);
+ }
+
ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
@@ -860,6 +873,14 @@ class MANAGED Class FINAL : public Object {
ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
+ }
+
+ bool HasBeenRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
+ }
+
ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8fb860fa6b..48f2ca59e8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -20,9 +20,8 @@
#include "class_linker.h"
#include "common_runtime_test.h"
-#include "gc/heap.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "linear_alloc.h"
+#include "mirror/class_loader-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -36,7 +35,9 @@ TEST_F(DexCacheTest, Open) {
StackHandleScope<1> hs(soa.Self());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
- hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(),
+ *java_lang_dex_file_,
+ Runtime::Current()->GetLinearAlloc())));
ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
@@ -45,5 +46,21 @@ TEST_F(DexCacheTest, Open) {
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
}
+TEST_F(DexCacheTest, LinearAlloc) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader(LoadDex("Main"));
+ ASSERT_TRUE(jclass_loader != nullptr);
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LMain;", class_loader);
+ ASSERT_TRUE(klass != nullptr);
+ LinearAlloc* const linear_alloc = klass->GetClassLoader()->GetAllocator();
+ EXPECT_NE(linear_alloc, runtime->GetLinearAlloc());
+ EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods()));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 50490bbcae..f75b8aeef4 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -137,9 +137,13 @@ class MANAGED LOCKABLE Object {
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
- mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
- EXCLUSIVE_LOCK_FUNCTION();
- bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* MonitorEnter(Thread* self)
+ EXCLUSIVE_LOCK_FUNCTION()
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MonitorExit(Thread* self)
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
UNLOCK_FUNCTION();
void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index f7ab10be9d..116cbe9254 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -49,8 +49,13 @@ static constexpr uint32_t kAccPreverified = 0x00080000; // class (runt
// method (dex only)
static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
+static constexpr uint32_t kAccDefault = 0x00400000; // method (runtime)
// Special runtime-only flags.
+// Interface and all its super-interfaces with default methods have been recursively initialized.
+static constexpr uint32_t kAccRecursivelyInitialized = 0x20000000;
+// Interface declares some default method.
+static constexpr uint32_t kAccHasDefaultMethod = 0x40000000;
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index fa5841882e..81e7e6d675 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -274,7 +274,7 @@ void Monitor::Lock(Thread* self) {
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
const char* owners_filename;
- uint32_t owners_line_number;
+ int32_t owners_line_number;
TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
LOG(WARNING) << "Long monitor contention event with owner method="
@@ -696,6 +696,7 @@ static mirror::Object* FakeUnlock(mirror::Object* obj)
mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
+ self->AssertThreadSuspensionIsAllowable();
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
@@ -771,6 +772,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
+ self->AssertThreadSuspensionIsAllowable();
obj = FakeUnlock(obj);
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -1084,7 +1086,7 @@ bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
}
void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc,
- const char** source_file, uint32_t* line_number) const {
+ const char** source_file, int32_t* line_number) const {
// If method is null, location is unknown
if (method == nullptr) {
*source_file = "";
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 8cd93c69d7..707d0f112c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -65,12 +65,16 @@ class Monitor {
// NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
EXCLUSIVE_LOCK_FUNCTION(obj)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
static bool MonitorExit(Thread* thread, mirror::Object* obj)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
SHARED_REQUIRES(Locks::mutator_lock_)
- UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS;
+ UNLOCK_FUNCTION(obj);
static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
DoNotify(self, obj, false);
@@ -179,7 +183,7 @@ class Monitor {
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number)
+ const char* owner_filename, int32_t owner_line_number)
SHARED_REQUIRES(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
@@ -231,7 +235,7 @@ class Monitor {
// Translates the provided method and pc into its declaring class' source file and line number.
void TranslateLocation(ArtMethod* method, uint32_t pc,
- const char** source_file, uint32_t* line_number) const
+ const char** source_file, int32_t* line_number) const
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index efe2e823d9..82ef2d841a 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -50,7 +50,7 @@ static char* EventLogWriteString(char* dst, const char* value, size_t len) {
}
void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number) {
+ const char* owner_filename, int32_t owner_line_number) {
// Emit the event list length, 1 byte.
char eventBuffer[174];
char* cp = eventBuffer;
@@ -80,7 +80,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
const char* filename;
- uint32_t line_number;
+ int32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
cp = EventLogWriteString(cp, filename, strlen(filename));
diff --git a/runtime/monitor_linux.cc b/runtime/monitor_linux.cc
index 856ebe45f9..1c77ac0eb3 100644
--- a/runtime/monitor_linux.cc
+++ b/runtime/monitor_linux.cc
@@ -18,7 +18,7 @@
namespace art {
-void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, uint32_t) {
+void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, int32_t) {
}
} // namespace art
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4aebc2c35f..4eea3f39f7 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,6 +27,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "oat_file_assistant.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "profiler.h"
#include "runtime.h"
@@ -39,13 +40,16 @@
namespace art {
-static std::unique_ptr<std::vector<const DexFile*>>
-ConvertJavaArrayToNative(JNIEnv* env, jobject arrayObject) {
+static bool ConvertJavaArrayToDexFiles(
+ JNIEnv* env,
+ jobject arrayObject,
+ /*out*/ std::vector<const DexFile*>& dex_files,
+ /*out*/ const OatFile*& oat_file) {
jarray array = reinterpret_cast<jarray>(arrayObject);
jsize array_size = env->GetArrayLength(array);
if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
+ return false;
}
// TODO: Optimize. On 32bit we can use an int array.
@@ -53,27 +57,24 @@ ConvertJavaArrayToNative(JNIEnv* env, jobject arrayObject) {
jlong* long_data = env->GetLongArrayElements(reinterpret_cast<jlongArray>(array),
&is_long_data_copied);
if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
+ return false;
}
- std::unique_ptr<std::vector<const DexFile*>> ret(new std::vector<const DexFile*>());
- ret->reserve(array_size);
- for (jsize i = 0; i < array_size; ++i) {
- ret->push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(*(long_data + i))));
+ oat_file = reinterpret_cast<const OatFile*>(static_cast<uintptr_t>(long_data[kOatFileIndex]));
+ dex_files.reserve(array_size - 1);
+ for (jsize i = kDexFileIndexStart; i < array_size; ++i) {
+ dex_files.push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(long_data[i])));
}
env->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array), long_data, JNI_ABORT);
- if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
- }
-
- return ret;
+ return env->ExceptionCheck() != JNI_TRUE;
}
-static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
- std::vector<std::unique_ptr<const DexFile>>& vec) {
- size_t vec_size = vec.size();
- jlongArray long_array = env->NewLongArray(static_cast<jsize>(vec_size));
+static jlongArray ConvertDexFilesToJavaArray(JNIEnv* env,
+ const OatFile* oat_file,
+ std::vector<std::unique_ptr<const DexFile>>& vec) {
+ // Add one for the oat file.
+ jlongArray long_array = env->NewLongArray(static_cast<jsize>(kDexFileIndexStart + vec.size()));
if (env->ExceptionCheck() == JNI_TRUE) {
return nullptr;
}
@@ -84,10 +85,9 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
return nullptr;
}
- jlong* tmp = long_data;
- for (auto& dex_file : vec) {
- *tmp = reinterpret_cast<uintptr_t>(dex_file.get());
- tmp++;
+ long_data[kOatFileIndex] = reinterpret_cast<uintptr_t>(oat_file);
+ for (size_t i = 0; i < vec.size(); ++i) {
+ long_data[kDexFileIndexStart + i] = reinterpret_cast<uintptr_t>(vec[i].get());
}
env->ReleaseLongArrayElements(long_array, long_data, 0);
@@ -160,14 +160,19 @@ static jobject DexFile_openDexFileNative(
return 0;
}
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* linker = runtime->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
+ const OatFile* oat_file = nullptr;
- dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs);
+ dex_files = runtime->GetOatFileManager().OpenDexFilesFromOat(sourceName.c_str(),
+ outputName.c_str(),
+ /*out*/ &oat_file,
+ /*out*/ &error_msgs);
if (!dex_files.empty()) {
- jlongArray array = ConvertNativeToJavaArray(env, dex_files);
+ jlongArray array = ConvertDexFilesToJavaArray(env, oat_file, dex_files);
if (array == nullptr) {
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
@@ -193,43 +198,55 @@ static jobject DexFile_openDexFileNative(
}
static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
- ScopedObjectAccess soa(env);
- mirror::Object* dex_files_object = soa.Decode<mirror::Object*>(cookie);
- if (dex_files_object == nullptr) {
- ThrowNullPointerException("cookie == null");
+ std::vector<const DexFile*> dex_files;
+ const OatFile* oat_file;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, dex_files, oat_file)) {
+ Thread::Current()->AssertPendingException();
return JNI_FALSE;
}
- mirror::LongArray* dex_files = dex_files_object->AsLongArray();
-
- // Delete dex files associated with this dalvik.system.DexFile since there should not be running
- // code using it. dex_files is a vector due to multidex.
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
bool all_deleted = true;
- for (int32_t i = 0, count = dex_files->GetLength(); i < count; ++i) {
- auto* dex_file = reinterpret_cast<DexFile*>(dex_files->Get(i));
- if (dex_file == nullptr) {
- continue;
- }
- // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
- // are calls to DexFile.close while the ART DexFile is still in use.
- if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
- // Clear the element in the array so that we can call close again.
- dex_files->Set(i, 0);
- delete dex_file;
- } else {
- all_deleted = false;
+ {
+ ScopedObjectAccess soa(env);
+ mirror::Object* dex_files_object = soa.Decode<mirror::Object*>(cookie);
+ mirror::LongArray* long_dex_files = dex_files_object->AsLongArray();
+ // Delete dex files associated with this dalvik.system.DexFile since there should not be running
+ // code using it. dex_files is a vector due to multidex.
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ int32_t i = kDexFileIndexStart; // Oat file is at index 0.
+ for (const DexFile* dex_file : dex_files) {
+ if (dex_file != nullptr) {
+ // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
+ // are calls to DexFile.close while the ART DexFile is still in use.
+ if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
+ // Clear the element in the array so that we can call close again.
+ long_dex_files->Set(i, 0);
+ delete dex_file;
+ } else {
+ all_deleted = false;
+ }
+ }
+ ++i;
}
}
- // TODO: Also unmap the OatFile for this dalvik.system.DexFile.
-
+ // oat_file can be null if we are running without dex2oat.
+ if (all_deleted && oat_file != nullptr) {
+ // If all of the dex files are no longer in use we can unmap the corresponding oat file.
+ VLOG(class_linker) << "Unregistering " << oat_file;
+ runtime->GetOatFileManager().UnRegisterAndDeleteOatFile(oat_file);
+ }
return all_deleted ? JNI_TRUE : JNI_FALSE;
}
-static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader,
+static jclass DexFile_defineClassNative(JNIEnv* env,
+ jclass,
+ jstring javaName,
+ jobject javaLoader,
jobject cookie) {
- std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
- if (dex_files.get() == nullptr) {
+ std::vector<const DexFile*> dex_files;
+ const OatFile* oat_file;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out*/ dex_files, /*out*/ oat_file)) {
VLOG(class_linker) << "Failed to find dex_file";
DCHECK(env->ExceptionCheck());
return nullptr;
@@ -242,17 +259,23 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
- for (auto& dex_file : *dex_files) {
+ for (auto& dex_file : dex_files) {
const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor.c_str(), hash);
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->RegisterDexFile(*dex_file);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
- mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(), hash,
- class_loader, *dex_file, *dex_class_def);
+ class_linker->RegisterDexFile(
+ *dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()));
+ mirror::Class* result = class_linker->DefineClass(soa.Self(),
+ descriptor.c_str(),
+ hash,
+ class_loader,
+ *dex_file,
+ *dex_class_def);
if (result != nullptr) {
VLOG(class_linker) << "DexFile_defineClassNative returning " << result
<< " for " << class_name.c_str();
@@ -273,8 +296,9 @@ struct CharPointerComparator {
// Note: this can be an expensive call, as we sort out duplicates in MultiDex files.
static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie) {
- std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
- if (dex_files.get() == nullptr) {
+ const OatFile* oat_file = nullptr;
+ std::vector<const DexFile*> dex_files;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) {
DCHECK(env->ExceptionCheck());
return nullptr;
}
@@ -282,7 +306,7 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
// Push all class descriptors into a set. Use set instead of unordered_set as we want to
// retrieve all in the end.
std::set<const char*, CharPointerComparator> descriptors;
- for (auto& dex_file : *dex_files) {
+ for (auto& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
@@ -291,7 +315,8 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
}
// Now create output array and copy the set into it.
- jobjectArray result = env->NewObjectArray(descriptors.size(), WellKnownClasses::java_lang_String,
+ jobjectArray result = env->NewObjectArray(descriptors.size(),
+ WellKnownClasses::java_lang_String,
nullptr);
if (result != nullptr) {
auto it = descriptors.begin();
@@ -309,9 +334,11 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
return result;
}
-static jint GetDexOptNeeded(JNIEnv* env, const char* filename,
- const char* pkgname, const char* instruction_set, const jboolean defer) {
-
+static jint GetDexOptNeeded(JNIEnv* env,
+ const char* filename,
+ const char* pkgname,
+ const char* instruction_set,
+ const jboolean defer) {
if ((filename == nullptr) || !OS::FileExists(filename)) {
LOG(ERROR) << "DexFile_getDexOptNeeded file '" << filename << "' does not exist";
ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
@@ -361,8 +388,12 @@ static jint GetDexOptNeeded(JNIEnv* env, const char* filename,
return oat_file_assistant.GetDexOptNeeded();
}
-static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
- jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
+static jint DexFile_getDexOptNeeded(JNIEnv* env,
+ jclass,
+ jstring javaFilename,
+ jstring javaPkgname,
+ jstring javaInstructionSet,
+ jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
if (env->ExceptionCheck()) {
return 0;
@@ -375,8 +406,11 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
return 0;
}
- return GetDexOptNeeded(env, filename.c_str(), pkgname.c_str(),
- instruction_set.c_str(), defer);
+ return GetDexOptNeeded(env,
+ filename.c_str(),
+ pkgname.c_str(),
+ instruction_set.c_str(),
+ defer);
}
// public API, null pkgname
diff --git a/runtime/native/dalvik_system_DexFile.h b/runtime/native/dalvik_system_DexFile.h
index 7585ab972c..77d219dfad 100644
--- a/runtime/native/dalvik_system_DexFile.h
+++ b/runtime/native/dalvik_system_DexFile.h
@@ -18,9 +18,13 @@
#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
#include <jni.h>
+#include <unistd.h>
namespace art {
+constexpr size_t kOatFileIndex = 0;
+constexpr size_t kDexFileIndexStart = 1;
+
class DexFile;
void register_dalvik_system_DexFile(JNIEnv* env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4f957233c4..4c5dc3ad25 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -497,7 +497,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc())));
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5da15df25b..3a73900efa 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -522,6 +522,10 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
}
if (classes == nullptr) {
// Return an empty array instead of a null pointer.
+ if (soa.Self()->IsExceptionPending()) {
+ // Pending exception from GetDeclaredClasses.
+ return nullptr;
+ }
mirror::Class* class_class = mirror::Class::GetJavaLangClass();
mirror::Class* class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 5725b6ff6c..5625499848 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -134,6 +134,9 @@ bool OatHeader::IsValid() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return false;
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return false;
+ }
return true;
}
@@ -156,6 +159,9 @@ std::string OatHeader::GetValidationErrorMessage() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return "Image patch delta not page-aligned.";
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return StringPrintf("Invalid instruction set, %d.", static_cast<int>(instruction_set_));
+ }
return "";
}
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 5df652579f..f7913e177a 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -22,7 +22,7 @@
namespace art {
inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -47,7 +47,7 @@ inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
}
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -55,7 +55,7 @@ inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
}
inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -63,7 +63,7 @@ inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
}
inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -71,7 +71,7 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
}
inline const uint8_t* OatFile::OatMethod::GetGcMap() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -122,7 +122,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
}
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -134,7 +134,7 @@ inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
}
inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -146,7 +146,7 @@ inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
}
inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return 0u;
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a4a159e0da..d931777e41 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -42,9 +42,11 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "oat_file-inl.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "runtime.h"
#include "utils.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "vmap_table.h"
namespace art {
@@ -115,15 +117,35 @@ OatFile* OatFile::Open(const std::string& filename,
// TODO: Also try when not executable? The issue here could be re-mapping as writable (as
// !executable is a sign that we may want to patch), which may not be allowed for
// various reasons.
- if (kUseDlopen && (kIsTargetBuild || kUseDlopenOnHost) && executable) {
- // Try to use dlopen. This may fail for various reasons, outlined below. We try dlopen, as
- // this will register the oat file with the linker and allows libunwind to find our info.
- ret.reset(OpenDlopen(filename, location, requested_base, abs_dex_location, error_msg));
- if (ret.get() != nullptr) {
- return ret.release();
+ // dlopen always returns the same library if it is already opened on the host. For this reason
+ // we only use dlopen if we are the target or we do not already have the dex file opened. Having
+ // the same library loaded multiple times at different addresses is required for class unloading
+ // and for having dex caches arrays in the .bss section.
+ Runtime* const runtime = Runtime::Current();
+ OatFileManager* const manager = (runtime != nullptr) ? &runtime->GetOatFileManager() : nullptr;
+ if (kUseDlopen && executable) {
+ bool success = kIsTargetBuild;
+ bool reserved_location = false;
+ // Manager may be null if we are running without a runtime.
+ if (!success && kUseDlopenOnHost && manager != nullptr) {
+ // RegisterOatFileLocation returns false if we are not the first caller to register that
+ // location.
+ reserved_location = manager->RegisterOatFileLocation(location);
+ success = reserved_location;
}
- if (kPrintDlOpenErrorMessage) {
- LOG(ERROR) << "Failed to dlopen: " << *error_msg;
+ if (success) {
+ // Try to use dlopen. This may fail for various reasons, outlined below. We try dlopen, as
+ // this will register the oat file with the linker and allows libunwind to find our info.
+ ret.reset(OpenDlopen(filename, location, requested_base, abs_dex_location, error_msg));
+ if (reserved_location) {
+ manager->UnRegisterOatFileLocation(location);
+ }
+ if (ret != nullptr) {
+ return ret.release();
+ }
+ if (kPrintDlOpenErrorMessage) {
+ LOG(ERROR) << "Failed to dlopen: " << *error_msg;
+ }
}
}
@@ -204,6 +226,10 @@ OatFile::OatFile(const std::string& location, bool is_executable)
is_executable_(is_executable), dlopen_handle_(nullptr),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
+ Runtime* const runtime = Runtime::Current();
+ if (runtime != nullptr && !runtime->IsAotCompiler()) {
+ runtime->GetOatFileManager().RegisterOatFileLocation(location);
+ }
}
OatFile::~OatFile() {
@@ -211,6 +237,10 @@ OatFile::~OatFile() {
if (dlopen_handle_ != nullptr) {
dlclose(dlopen_handle_);
}
+ Runtime* const runtime = Runtime::Current();
+ if (runtime != nullptr && !runtime->IsAotCompiler()) {
+ runtime->GetOatFileManager().UnRegisterOatFileLocation(location_);
+ }
}
bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
@@ -403,6 +433,8 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
+ size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ const uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
@@ -484,6 +516,22 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
+ const uint8_t* current_dex_cache_arrays = nullptr;
+ if (dex_cache_arrays != nullptr) {
+ DexCacheArraysLayout layout(pointer_size, *header);
+ if (layout.Size() != 0u) {
+ if (static_cast<size_t>(bss_end_ - dex_cache_arrays) < layout.Size()) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with "
+ "truncated dex cache arrays, %zd < %zd.",
+ GetLocation().c_str(), i, dex_file_location.c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays), layout.Size());
+ return false;
+ }
+ current_dex_cache_arrays = dex_cache_arrays;
+ dex_cache_arrays += layout.Size();
+ }
+ }
+
std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
// Create the OatDexFile and add it to the owning container.
@@ -492,7 +540,8 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
canonical_location,
dex_file_checksum,
dex_file_pointer,
- methods_offsets_pointer);
+ methods_offsets_pointer,
+ current_dex_cache_arrays);
oat_dex_files_storage_.push_back(oat_dex_file);
// Add the location and canonical location (if different) to the oat_dex_files_ table.
@@ -503,6 +552,15 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
oat_dex_files_.Put(canonical_key, oat_dex_file);
}
}
+
+ if (dex_cache_arrays != bss_end_) {
+ // We expect the bss section to be either empty (dex_cache_arrays and bss_end_
+ // both null) or contain just the dex cache arrays and nothing else.
+ *error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zd bytes.",
+ GetLocation().c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays));
+ return false;
+ }
return true;
}
@@ -605,13 +663,15 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer)
+ const uint32_t* oat_class_offsets_pointer,
+ const uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
canonical_dex_file_location_(canonical_dex_file_location),
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
- oat_class_offsets_pointer_(oat_class_offsets_pointer) {}
+ oat_class_offsets_pointer_(oat_class_offsets_pointer),
+ dex_cache_arrays_(dex_cache_arrays) {}
OatFile::OatDexFile::~OatDexFile() {}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 27f8677f03..34f014123b 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,6 +29,7 @@
#include "mirror/class.h"
#include "oat.h"
#include "os.h"
+#include "utils.h"
namespace art {
@@ -395,6 +396,10 @@ class OatDexFile FINAL {
// Returns the offset to the OatClass information. Most callers should use GetOatClass.
uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+ const uint8_t* GetDexCacheArrays() const {
+ return dex_cache_arrays_;
+ }
+
~OatDexFile();
private:
@@ -403,7 +408,8 @@ class OatDexFile FINAL {
const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer);
+ const uint32_t* oat_class_offsets_pointer,
+ const uint8_t* dex_cache_arrays);
const OatFile* const oat_file_;
const std::string dex_file_location_;
@@ -411,6 +417,7 @@ class OatDexFile FINAL {
const uint32_t dex_file_location_checksum_;
const uint8_t* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
+ const uint8_t* const dex_cache_arrays_;
friend class OatFile;
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 20347a9063..cef8702937 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -31,6 +31,7 @@
#include "compiler_callbacks.h"
#include "gc/space/image_space.h"
#include "mem_map.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -958,13 +959,18 @@ class RaceGenerateTask : public Task {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs);
+ const OatFile* oat_file = nullptr;
+ dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
+ dex_location_.c_str(),
+ oat_location_.c_str(),
+ &oat_file,
+ &error_msgs);
CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+ CHECK_EQ(loaded_oat_file_, oat_file);
}
const OatFile* GetLoadedOatFile() const {
@@ -980,8 +986,9 @@ class RaceGenerateTask : public Task {
// Test the case where multiple processes race to generate an oat file.
// This simulates multiple processes using multiple threads.
//
-// We want only one Oat file to be loaded when there is a race to load, to
-// avoid using up the virtual memory address space.
+// We want unique Oat files to be loaded even when there is a race to load.
+// TODO: The test case no longer tests locking the way it was intended since we now get multiple
+// copies of the same Oat files mapped at different locations.
TEST_F(OatFileAssistantTest, RaceToGenerate) {
std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
@@ -1002,10 +1009,12 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
thread_pool.StartWorkers(self);
thread_pool.Wait(self, true, false);
- // Verify every task got the same pointer.
- const OatFile* expected = tasks[0]->GetLoadedOatFile();
+ // Verify every task got a unique oat file.
+ std::set<const OatFile*> oat_files;
for (auto& task : tasks) {
- EXPECT_EQ(expected, task->GetLoadedOatFile());
+ const OatFile* oat_file = task->GetLoadedOatFile();
+ EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
+ oat_files.insert(oat_file);
}
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
new file mode 100644
index 0000000000..9eee156bb0
--- /dev/null
+++ b/runtime/oat_file_manager.cc
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_manager.h"
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "dex_file-inl.h"
+#include "gc/space/image_space.h"
+#include "oat_file_assistant.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// For b/21333911.
+// Only enabled for debug builds to prevent bit rot. There are too many performance regressions for
+// normal builds.
+static constexpr bool kDuplicateClassesCheck = kIsDebugBuild;
+
+const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ DCHECK(oat_file != nullptr);
+ if (kIsDebugBuild) {
+ CHECK(oat_files_.find(oat_file) == oat_files_.end());
+ for (const std::unique_ptr<const OatFile>& existing : oat_files_) {
+ CHECK_NE(oat_file.get(), existing.get()) << oat_file->GetLocation();
+ // Check that we don't have an oat file with the same address. Copies of the same oat file
+ // should be loaded at different addresses.
+ CHECK_NE(oat_file->Begin(), existing->Begin()) << "Oat file already mapped at that location";
+ }
+ }
+ have_non_pic_oat_file_ = have_non_pic_oat_file_ || !oat_file->IsPic();
+ const OatFile* ret = oat_file.get();
+ oat_files_.insert(std::move(oat_file));
+ return ret;
+}
+
+void OatFileManager::UnRegisterAndDeleteOatFile(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ DCHECK(oat_file != nullptr);
+ std::unique_ptr<const OatFile> compare(oat_file);
+ auto it = oat_files_.find(compare);
+ CHECK(it != oat_files_.end());
+ oat_files_.erase(it);
+ compare.release();
+}
+
+const OatFile* OatFileManager::FindOpenedOatFileFromOatLocation(const std::string& oat_location)
+ const {
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ return FindOpenedOatFileFromOatLocationLocked(oat_location);
+}
+
+const OatFile* OatFileManager::FindOpenedOatFileFromOatLocationLocked(
+ const std::string& oat_location) const {
+ for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
+ if (oat_file->GetLocation() == oat_location) {
+ return oat_file.get();
+ }
+ }
+ return nullptr;
+}
+
+const OatFile* OatFileManager::GetBootOatFile() const {
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ if (image_space == nullptr) {
+ return nullptr;
+ }
+ return image_space->GetOatFile();
+}
+
+const OatFile* OatFileManager::GetPrimaryOatFile() const {
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ const OatFile* boot_oat_file = GetBootOatFile();
+ if (boot_oat_file != nullptr) {
+ for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
+ if (oat_file.get() != boot_oat_file) {
+ return oat_file.get();
+ }
+ }
+ }
+ return nullptr;
+}
+
+OatFileManager::~OatFileManager() {
+ // Explicitly clear oat_files_ since the OatFile destructor calls back into OatFileManager for
+ // UnRegisterOatFileLocation.
+ oat_files_.clear();
+}
+
+const OatFile* OatFileManager::RegisterImageOatFile(gc::space::ImageSpace* space) {
+ return RegisterOatFile(space->ReleaseOatFile());
+}
+
+class DexFileAndClassPair : ValueObject {
+ public:
+ DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
+ : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
+ dex_file_(dex_file),
+ current_class_index_(current_class_index),
+ from_loaded_oat_(from_loaded_oat) {}
+
+ DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
+
+ DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) = default;
+
+ const char* GetCachedDescriptor() const {
+ return cached_descriptor_;
+ }
+
+ bool operator<(const DexFileAndClassPair& rhs) const {
+ const int cmp = strcmp(cached_descriptor_, rhs.cached_descriptor_);
+ if (cmp != 0) {
+ // Note that the order must be reversed. We want to iterate over the classes in dex files.
+ // They are sorted lexicographically. Thus, the priority-queue must be a min-queue.
+ return cmp > 0;
+ }
+ return dex_file_ < rhs.dex_file_;
+ }
+
+ bool DexFileHasMoreClasses() const {
+ return current_class_index_ + 1 < dex_file_->NumClassDefs();
+ }
+
+ void Next() {
+ ++current_class_index_;
+ cached_descriptor_ = GetClassDescriptor(dex_file_.get(), current_class_index_);
+ }
+
+ size_t GetCurrentClassIndex() const {
+ return current_class_index_;
+ }
+
+ bool FromLoadedOat() const {
+ return from_loaded_oat_;
+ }
+
+ const DexFile* GetDexFile() const {
+ return dex_file_.get();
+ }
+
+ private:
+ static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
+ DCHECK(IsUint<16>(index));
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
+ return dex_file->StringByTypeIdx(class_def.class_idx_);
+ }
+
+ const char* cached_descriptor_;
+ std::shared_ptr<const DexFile> dex_file_;
+ size_t current_class_index_;
+ bool from_loaded_oat_; // We only need to compare mismatches between what we load now
+ // and what was loaded before. Any old duplicates must have been
+ // OK, and any new "internal" duplicates are as well (they must
+ // be from multidex, which resolves correctly).
+};
+
+static void AddDexFilesFromOat(const OatFile* oat_file,
+ bool already_loaded,
+ /*out*/std::priority_queue<DexFileAndClassPair>* heap) {
+ for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ std::string error;
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
+ if (dex_file == nullptr) {
+ LOG(WARNING) << "Could not create dex file from oat file: " << error;
+ } else if (dex_file->NumClassDefs() > 0U) {
+ heap->emplace(dex_file.release(), /*current_class_index*/0U, already_loaded);
+ }
+ }
+}
+
+static void AddNext(/*inout*/DexFileAndClassPair* original,
+ /*inout*/std::priority_queue<DexFileAndClassPair>* heap) {
+ if (original->DexFileHasMoreClasses()) {
+ original->Next();
+ heap->push(std::move(*original));
+ }
+}
+
+// Check for class-def collisions in dex files.
+//
+// This works by maintaining a heap with one class from each dex file, sorted by the class
+// descriptor. Then a dex-file/class pair is continually removed from the heap and compared
+// against the following top element. If the descriptor is the same, it is now checked whether
+// the two elements agree on whether their dex file was from an already-loaded oat-file or the
+// new oat file. Any disagreement indicates a collision.
+bool OatFileManager::HasCollisions(const OatFile* oat_file,
+ std::string* error_msg /*out*/) const {
+ DCHECK(oat_file != nullptr);
+ DCHECK(error_msg != nullptr);
+ if (!kDuplicateClassesCheck) {
+ return false;
+ }
+
+ // Dex files are registered late - once a class is actually being loaded. We have to compare
+ // against the open oat files. Take the oat_file_manager_lock_ that protects oat_files_ accesses.
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+
+ std::priority_queue<DexFileAndClassPair> queue;
+
+ // Add dex files from already loaded oat files, but skip boot.
+ const OatFile* boot_oat = GetBootOatFile();
+ // The same OatFile can be loaded multiple times at different addresses. In this case, we don't
+ // need to check both against each other since they would have resolved the same way at compile
+ // time.
+ std::unordered_set<std::string> unique_locations;
+ for (const std::unique_ptr<const OatFile>& loaded_oat_file : oat_files_) {
+ DCHECK_NE(loaded_oat_file.get(), oat_file);
+ const std::string& location = loaded_oat_file->GetLocation();
+ if (loaded_oat_file.get() != boot_oat &&
+ location != oat_file->GetLocation() &&
+ unique_locations.find(location) == unique_locations.end()) {
+ unique_locations.insert(location);
+ AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue);
+ }
+ }
+
+ if (queue.empty()) {
+ // No other oat files, return early.
+ return false;
+ }
+
+ // Add dex files from the oat file to check.
+ AddDexFilesFromOat(oat_file, /*already_loaded*/false, &queue);
+
+ // Now drain the queue.
+ while (!queue.empty()) {
+ // Modifying the top element is only safe if we pop right after.
+ DexFileAndClassPair compare_pop(queue.top());
+ queue.pop();
+
+ // Compare against the following elements.
+ while (!queue.empty()) {
+ DexFileAndClassPair top(queue.top());
+
+ if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
+ // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
+ if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
+ *error_msg =
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ compare_pop.GetCachedDescriptor(),
+ compare_pop.GetDexFile()->GetLocation().c_str(),
+ top.GetDexFile()->GetLocation().c_str());
+ return true;
+ }
+ queue.pop();
+ AddNext(&top, &queue);
+ } else {
+ // Something else. Done here.
+ break;
+ }
+ }
+ AddNext(&compare_pop, &queue);
+ }
+
+ return false;
+}
+
+std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
+ const char* dex_location,
+ const char* oat_location,
+ const OatFile** out_oat_file,
+ std::vector<std::string>* error_msgs) {
+ CHECK(dex_location != nullptr);
+ CHECK(error_msgs != nullptr);
+
+ // Verify we aren't holding the mutator lock, which could starve GC if we
+ // have to generate or relocate an oat file.
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current());
+
+ OatFileAssistant oat_file_assistant(dex_location,
+ oat_location,
+ kRuntimeISA,
+ !Runtime::Current()->IsAotCompiler());
+
+ // Lock the target oat location to avoid races generating and loading the
+ // oat file.
+ std::string error_msg;
+ if (!oat_file_assistant.Lock(/*out*/&error_msg)) {
+ // Don't worry too much if this fails. If it does fail, it's unlikely we
+ // can generate an oat file anyway.
+ VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
+ }
+
+ const OatFile* source_oat_file = nullptr;
+
+ // Update the oat file on disk if we can. This may fail, but that's okay.
+ // Best effort is all that matters here.
+ if (!oat_file_assistant.MakeUpToDate(/*out*/&error_msg)) {
+ LOG(WARNING) << error_msg;
+ }
+
+ // Get the oat file on disk.
+ std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
+ if (oat_file != nullptr) {
+ // Take the file only if it has no collisions, or we must take it because of preopting.
+ bool accept_oat_file = !HasCollisions(oat_file.get(), /*out*/ &error_msg);
+ if (!accept_oat_file) {
+ // Failed the collision check. Print warning.
+ if (Runtime::Current()->IsDexFileFallbackEnabled()) {
+ LOG(WARNING) << "Found duplicate classes, falling back to interpreter mode for "
+ << dex_location;
+ } else {
+ LOG(WARNING) << "Found duplicate classes, dex-file-fallback disabled, will be failing to "
+ " load classes for " << dex_location;
+ }
+ LOG(WARNING) << error_msg;
+
+ // However, if the app was part of /system and preopted, there is no original dex file
+ // available. In that case grudgingly accept the oat file.
+ if (!DexFile::MaybeDex(dex_location)) {
+ accept_oat_file = true;
+ LOG(WARNING) << "Dex location " << dex_location << " does not seem to include dex file. "
+ << "Allow oat file use. This is potentially dangerous.";
+ }
+ }
+
+ if (accept_oat_file) {
+ VLOG(class_linker) << "Registering " << oat_file->GetLocation();
+ source_oat_file = RegisterOatFile(std::move(oat_file));
+ *out_oat_file = source_oat_file;
+ }
+ }
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+
+ // Load the dex files from the oat file.
+ if (source_oat_file != nullptr) {
+ dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
+ if (dex_files.empty()) {
+ error_msgs->push_back("Failed to open dex files from " + source_oat_file->GetLocation());
+ }
+ }
+
+ // Fall back to running out of the original dex file if we couldn't load any
+ // dex_files from the oat file.
+ if (dex_files.empty()) {
+ if (oat_file_assistant.HasOriginalDexFiles()) {
+ if (Runtime::Current()->IsDexFileFallbackEnabled()) {
+ if (!DexFile::Open(dex_location, dex_location, /*out*/ &error_msg, &dex_files)) {
+ LOG(WARNING) << error_msg;
+ error_msgs->push_back("Failed to open dex files from " + std::string(dex_location));
+ }
+ } else {
+ error_msgs->push_back("Fallback mode disabled, skipping dex files.");
+ }
+ } else {
+ error_msgs->push_back("No original dex files found for dex location "
+ + std::string(dex_location));
+ }
+ }
+ return dex_files;
+}
+
+bool OatFileManager::RegisterOatFileLocation(const std::string& oat_location) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_count_lock_);
+ auto it = oat_file_count_.find(oat_location);
+ if (it != oat_file_count_.end()) {
+ ++it->second;
+ return false;
+ }
+ oat_file_count_.insert(std::pair<std::string, size_t>(oat_location, 1u));
+ return true;
+}
+
+void OatFileManager::UnRegisterOatFileLocation(const std::string& oat_location) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_count_lock_);
+ auto it = oat_file_count_.find(oat_location);
+ if (it != oat_file_count_.end()) {
+ --it->second;
+ if (it->second == 0) {
+ oat_file_count_.erase(it);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
new file mode 100644
index 0000000000..af7efb4262
--- /dev/null
+++ b/runtime/oat_file_manager.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_MANAGER_H_
+#define ART_RUNTIME_OAT_FILE_MANAGER_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+
+namespace art {
+
+namespace gc {
+namespace space {
+class ImageSpace;
+} // namespace space
+} // namespace gc
+
+class DexFile;
+class OatFile;
+
+// Class for dealing with oat file management.
+//
+// This class knows about all the loaded oat files and provides utility functions. The oat file
+// pointers returned from functions are always valid.
+class OatFileManager {
+ public:
+ OatFileManager() : have_non_pic_oat_file_(false) {}
+ ~OatFileManager();
+
+ // Add an oat file to the internal accounting, std::aborts if there already exists an oat file
+ // with the same base address. Returns the oat file pointer from oat_file.
+ const OatFile* RegisterOatFile(std::unique_ptr<const OatFile> oat_file)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ void UnRegisterAndDeleteOatFile(const OatFile* oat_file)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Find the first opened oat file with the same location, returns null if there are none.
+ const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) const
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Attempt to reserve a location, returns false if it is already reserved or already in used by
+ // an oat file.
+ bool RegisterOatFileLocation(const std::string& oat_location)
+ REQUIRES(!Locks::oat_file_count_lock_);
+
+ // Unreserve oat file location, should only be used for error cases since RegisterOatFile will
+ // remove the reserved location.
+ void UnRegisterOatFileLocation(const std::string& oat_location)
+ REQUIRES(!Locks::oat_file_count_lock_);
+
+ // Returns true if we have a non pic oat file.
+ bool HaveNonPicOatFile() const {
+ return have_non_pic_oat_file_;
+ }
+
+ // Returns the boot image oat file.
+ const OatFile* GetBootOatFile() const;
+
+ // Returns the first non-image oat file in the class path.
+ const OatFile* GetPrimaryOatFile() const REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Return the oat file for an image, registers the oat file. Takes ownership of the imagespace's
+ // underlying oat file.
+ const OatFile* RegisterImageOatFile(gc::space::ImageSpace* space)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Finds or creates the oat file holding dex_location. Then loads and returns
+ // all corresponding dex files (there may be more than one dex file loaded
+ // in the case of multidex).
+ // This may return the original, unquickened dex files if the oat file could
+ // not be generated.
+ //
+ // Returns an empty vector if the dex files could not be loaded. In this
+ // case, there will be at least one error message returned describing why no
+ // dex files could not be loaded. The 'error_msgs' argument must not be
+ // null, regardless of whether there is an error or not.
+ //
+ // This method should not be called with the mutator_lock_ held, because it
+ // could end up starving GC if we need to generate or relocate any oat
+ // files.
+ std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
+ const char* dex_location,
+ const char* oat_location,
+ /*out*/ const OatFile** out_oat_file,
+ /*out*/ std::vector<std::string>* error_msgs)
+ REQUIRES(!Locks::oat_file_manager_lock_, !Locks::mutator_lock_);
+
+ private:
+ // Check for duplicate class definitions of the given oat file against all open oat files.
+ // Return true if there are any class definition collisions in the oat_file.
+ bool HasCollisions(const OatFile* oat_file, /*out*/std::string* error_msg) const
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ const OatFile* FindOpenedOatFileFromOatLocationLocked(const std::string& oat_location) const
+ REQUIRES(Locks::oat_file_manager_lock_);
+
+ std::set<std::unique_ptr<const OatFile>> oat_files_ GUARDED_BY(Locks::oat_file_manager_lock_);
+ std::unordered_map<std::string, size_t> oat_file_count_ GUARDED_BY(Locks::oat_file_count_lock_);
+ bool have_non_pic_oat_file_;
+ DISALLOW_COPY_AND_ASSIGN(OatFileManager);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_MANAGER_H_
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 50e2053c73..ae16c7f373 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -269,10 +269,10 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xfingerprint:_")
.WithType<std::string>()
.IntoKey(M::Fingerprint)
- .Define({"-Xexperimental-lambdas", "-Xnoexperimental-lambdas"})
- .WithType<bool>()
- .WithValues({true, false})
- .IntoKey(M::ExperimentalLambdas)
+ .Define("-Xexperimental:_")
+ .WithType<ExperimentalFlags>()
+ .AppendValues()
+ .IntoKey(M::Experimental)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
@@ -557,7 +557,14 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
args.Set(M::HeapGrowthLimit, args.GetOrDefault(M::MemoryMaximumSize));
}
- if (args.GetOrDefault(M::ExperimentalLambdas)) {
+ if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kDefaultMethods) {
+ LOG(WARNING) << "Default method support has been enabled. The verifier will be less strict "
+ << "in some cases. All existing invoke opcodes have an unstable updated "
+ << "specification and are nearly guaranteed to change over time. Do not attempt "
+ << "to write shipping code against the invoke opcodes with this flag.";
+ }
+
+ if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kLambdas) {
LOG(WARNING) << "Experimental lambdas have been enabled. All lambda opcodes have "
<< "an unstable specification and are nearly guaranteed to change over time. "
<< "Do not attempt to write shipping code against these opcodes.";
@@ -682,8 +689,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
UsageMessage(stream, " -Xno-dex-file-fallback "
"(Don't fall back to dex files without oat files)\n");
- UsageMessage(stream, " -X[no]experimental-lambdas\n"
- " (Enable new experimental dalvik opcodes, off by default)\n");
+ UsageMessage(stream, " -Xexperimental:{lambdas,default-methods} "
+ "(Enable new experimental dalvik opcodes and semantics, off by default)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 64c2249925..837662d879 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -74,6 +74,7 @@ enum InlineMethodOpcode : uint16_t {
kIntrinsicUnsafeGet,
kIntrinsicUnsafePut,
kIntrinsicSystemArrayCopyCharArray,
+ kIntrinsicSystemArrayCopy,
kInlineOpNop,
kInlineOpReturnArg,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 63f43cf3b2..7ba19ab8d6 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,6 +17,7 @@
#include "quick_exception_handler.h"
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
@@ -26,6 +27,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
+#include "stack_map.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -99,7 +101,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
- method->ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
+ GetCurrentCode().ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
return false; // End stack walk.
} else if (UNLIKELY(GetThread()->HasDebuggerShadowFrames())) {
@@ -159,7 +161,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
// If the handler is in optimized code, we need to set the catch environment.
if (*handler_quick_frame_ != nullptr &&
handler_method_ != nullptr &&
- handler_method_->IsOptimized(sizeof(void*))) {
+ ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*))) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
}
@@ -200,14 +202,14 @@ static VRegKind ToVRegKind(DexRegisterLocation::Kind kind) {
void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
DCHECK(!is_deoptimization_);
DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
- DCHECK(handler_method_ != nullptr && handler_method_->IsOptimized(sizeof(void*)));
+ DCHECK(handler_method_ != nullptr && ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*)));
if (kDebugExceptionDelivery) {
self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
}
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
- CodeInfo code_info = handler_method_->GetOptimizedCodeInfo();
+ CodeInfo code_info = ArtCode(handler_quick_frame_).GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
// Find stack map of the throwing instruction.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9fb21a8425..cd09bee4e6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -64,6 +64,7 @@
#include "debugger.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "experimental_flags.h"
#include "fault_handler.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -115,6 +116,7 @@
#include "native/sun_misc_Unsafe.h"
#include "native_bridge_art_interface.h"
#include "oat_file.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "parsed_options.h"
#include "profiler.h"
@@ -198,7 +200,7 @@ Runtime::Runtime()
no_sig_chain_(false),
is_native_bridge_loaded_(false),
zygote_max_failed_boots_(0),
- experimental_lambdas_(false) {
+ experimental_flags_(ExperimentalFlags::kNone) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
}
@@ -284,6 +286,7 @@ Runtime::~Runtime() {
delete heap_;
delete intern_table_;
delete java_vm_;
+ delete oat_file_manager_;
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
@@ -698,7 +701,7 @@ bool Runtime::IsShuttingDown(Thread* self) {
}
bool Runtime::IsDebuggable() const {
- const OatFile* oat_file = GetClassLinker()->GetPrimaryOatFile();
+ const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
return oat_file != nullptr && oat_file->IsDebuggable();
}
@@ -756,9 +759,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
if (elf_file.get() == nullptr) {
return false;
}
- std::unique_ptr<OatFile> oat_file(OatFile::OpenWithElfFile(elf_file.release(), oat_location,
- nullptr, &error_msg));
- if (oat_file.get() == nullptr) {
+ std::unique_ptr<const OatFile> oat_file(
+ OatFile::OpenWithElfFile(elf_file.release(), oat_location, nullptr, &error_msg));
+ if (oat_file == nullptr) {
LOG(INFO) << "Unable to use '" << oat_filename << "' because " << error_msg;
return false;
}
@@ -775,7 +778,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
dex_files->push_back(std::move(dex_file));
}
}
- Runtime::Current()->GetClassLinker()->RegisterOatFile(oat_file.release());
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
return true;
}
@@ -831,6 +834,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
QuasiAtomic::Startup();
+ oat_file_manager_ = new OatFileManager;
+
Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
@@ -880,7 +885,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
}
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
- experimental_lambdas_ = runtime_options.GetOrDefault(Opt::ExperimentalLambdas);
+ experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
ATRACE_BEGIN("CreateHeap");
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6154c34ec5..458f08a316 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
+#include "experimental_flags.h"
#include "gc_root.h"
#include "instrumentation.h"
#include "jobject_comparator.h"
@@ -82,6 +83,7 @@ class LinearAlloc;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
+class OatFileManager;
class SignalCatcher;
class StackOverflowHandler;
class SuspensionHandler;
@@ -531,8 +533,8 @@ class Runtime {
return zygote_max_failed_boots_;
}
- bool AreExperimentalLambdasEnabled() const {
- return experimental_lambdas_;
+ bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
+ return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
lambda::BoxTable* GetLambdaBoxTable() const {
@@ -573,6 +575,11 @@ class Runtime {
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
+ OatFileManager& GetOatFileManager() const {
+ DCHECK(oat_file_manager_ != nullptr);
+ return *oat_file_manager_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -763,13 +770,16 @@ class Runtime {
// eventually publish them as public-usable opcodes, but they aren't ready yet.
//
// Experimental opcodes should not be used by other production code.
- bool experimental_lambdas_;
+ ExperimentalFlags experimental_flags_;
MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
// Contains the build fingerprint, if given as a parameter.
std::string fingerprint_;
+ // Oat file manager, keeps track of what oat files are open.
+ OatFileManager* oat_file_manager_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index f0b3c4e4cb..44a13c9020 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@ struct Backtrace {
public:
explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
+ DumpNativeStack(os, GetTid(), "\t", nullptr, nullptr, raw_context_);
}
private:
// Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d88e84b602..7b5bc1ad9a 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -114,7 +114,7 @@ RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (bool, ExperimentalLambdas, false) // -X[no]experimental-lambdas
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{, lambdas, default-methods}
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 1d21a6494a..d8d916c7ee 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,6 +17,7 @@
#include "stack.h"
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -110,9 +111,9 @@ StackVisitor::StackVisitor(Thread* thread,
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- ArtMethod* outer_method = GetOuterMethod();
- uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ ArtCode outer_code = GetCurrentCode();
+ uint32_t native_pc_offset = outer_code.NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = outer_code.GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -142,7 +143,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
} else {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ return GetCurrentCode().ToDexPc(cur_quick_frame_pc_, abort_on_failure);
}
} else {
return 0;
@@ -160,7 +161,8 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
HandleScope* hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue());
+ reinterpret_cast<char*>(cur_quick_frame_) +
+ GetCurrentCode().GetHandleScopeOffset().SizeValue());
return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
@@ -190,7 +192,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
size_t StackVisitor::GetNativePcOffset() const {
DCHECK(!IsShadowFrame());
- return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
+ return GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
}
bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
@@ -199,10 +201,10 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
}
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return true; // TODO: Implement.
}
- const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -211,9 +213,7 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
const uint8_t* reg_bitmap = nullptr;
if (num_regs > 0) {
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+ uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
}
@@ -252,7 +252,7 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -266,10 +266,9 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -289,19 +288,16 @@ bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind ki
bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- ArtMethod* outer_method = GetOuterMethod();
- const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
DCHECK_EQ(m, GetMethod());
const DexFile::CodeItem* code_item = m->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, code_item->registers_size_);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
@@ -406,7 +402,7 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -420,10 +416,9 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -482,7 +477,7 @@ bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -497,10 +492,8 @@ bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t ne
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -591,7 +584,7 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -605,10 +598,9 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
bool StackVisitor::SetVRegPairFromQuickCode(
ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -725,14 +717,14 @@ void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -867,9 +859,9 @@ void StackVisitor::SanityCheckFrame() const {
}
}
if (cur_quick_frame_ != nullptr) {
- method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
+ GetCurrentCode().AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
// Frame sanity.
- size_t frame_size = method->GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
@@ -880,7 +872,7 @@ void StackVisitor::SanityCheckFrame() const {
// const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
const size_t kMaxExpectedFrameSize = 2 * KB;
CHECK_LE(frame_size, kMaxExpectedFrameSize);
- size_t return_pc_offset = method->GetReturnPcOffset().SizeValue();
+ size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
CHECK_LT(return_pc_offset, frame_size);
}
}
@@ -891,6 +883,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
CHECK_EQ(cur_depth_, 0U);
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
+ size_t inlined_frames_count = 0;
for (const ManagedStack* current_fragment = thread_->GetManagedStack();
current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
@@ -906,10 +899,10 @@ void StackVisitor::WalkStack(bool include_transitions) {
SanityCheckFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
- && method->IsOptimized(sizeof(void*))) {
- CodeInfo code_info = method->GetOptimizedCodeInfo();
+ && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
@@ -922,6 +915,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
return;
}
cur_depth_++;
+ inlined_frames_count++;
}
}
}
@@ -934,9 +928,9 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (context_ != nullptr) {
context_->FillCalleeSaves(*this);
}
- size_t frame_size = method->GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
// Compute PC for next stack frame from return PC.
- size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue();
+ size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
if (UNLIKELY(exit_stubs_installed)) {
@@ -952,27 +946,32 @@ void StackVisitor::WalkStack(bool include_transitions) {
ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
- } else if (instrumentation_frame.method_ != GetMethod()) {
- LOG(FATAL) << "Expected: " << PrettyMethod(instrumentation_frame.method_)
- << " Found: " << PrettyMethod(GetMethod());
+ } else {
+ CHECK_EQ(instrumentation_frame.method_, GetMethod())
+ << "Expected: " << PrettyMethod(instrumentation_frame.method_)
+ << " Found: " << PrettyMethod(GetMethod());
}
if (num_frames_ != 0) {
// Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
// recursion.
- CHECK(instrumentation_frame.frame_id_ == GetFrameId())
- << "Expected: " << instrumentation_frame.frame_id_
- << " Found: " << GetFrameId();
+ size_t frame_id = instrumentation::Instrumentation::ComputeFrameId(
+ thread_,
+ cur_depth_,
+ inlined_frames_count);
+ CHECK_EQ(instrumentation_frame.frame_id_, frame_id);
}
return_pc = instrumentation_frame.return_pc_;
}
}
+ ArtCode code = GetCurrentCode();
+
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
if (kDebugStackWalk) {
LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
- << " optimized=" << method->IsOptimized(sizeof(void*))
+ << " optimized=" << code.IsOptimized(sizeof(void*))
<< " native=" << method->IsNative()
<< " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
<< "," << method->GetEntryPointFromJni()
@@ -1051,4 +1050,87 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
}
}
+void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+
+ // If there's an error during enter, we won't have locked the monitor. So check there's no
+ // exception.
+ if (self->IsExceptionPending()) {
+ return;
+ }
+
+ if (monitors_ == nullptr) {
+ monitors_.reset(new std::vector<mirror::Object*>());
+ }
+ monitors_->push_back(obj);
+}
+
+void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ bool found_object = false;
+ if (monitors_ != nullptr) {
+ // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
+ // We arbitrarily choose the first one.
+ auto it = std::find(monitors_->begin(), monitors_->end(), obj);
+ if (it != monitors_->end()) {
+ monitors_->erase(it);
+ found_object = true;
+ }
+ }
+ if (!found_object) {
+ // The object wasn't found. Time for an IllegalMonitorStateException.
+ // The order here isn't fully clear. Assume that any other pending exception is swallowed.
+ // TODO: Maybe make already pending exception a suppressed exception.
+ self->ClearException();
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not lock monitor on object of type '%s' before unlocking",
+ PrettyTypeOf(const_cast<mirror::Object*>(obj)).c_str());
+ }
+}
+
+// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
+// that the object was locked.
+void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ obj->MonitorExit(self);
+}
+
+bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) {
+ DCHECK(self != nullptr);
+ if (monitors_ != nullptr) {
+ if (!monitors_->empty()) {
+ // There may be an exception pending, if the method is terminating abruptly. Clear it.
+ // TODO: Should we add this as a suppressed exception?
+ self->ClearException();
+
+ // OK, there are monitors that are still locked. To enforce structured locking (and avoid
+ // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
+ for (mirror::Object* obj : *monitors_) {
+ MonitorExitHelper(self, obj);
+ // If this raised an exception, ignore. TODO: Should we add this as suppressed
+ // exceptions?
+ if (self->IsExceptionPending()) {
+ self->ClearException();
+ }
+ }
+ // Raise an exception, just give the first object as the sample.
+ mirror::Object* first = (*monitors_)[0];
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not unlock monitor on object of type '%s'",
+ PrettyTypeOf(first).c_str());
+
+ // To make sure this path is not triggered again, clean out the monitors.
+ monitors_->clear();
+
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 31acf0eb64..3e0566d2f0 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,7 +20,10 @@
#include <stdint.h>
#include <string>
+#include "art_code.h"
#include "arch/instruction_set.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
#include "mirror/object_reference.h"
@@ -66,6 +69,72 @@ class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
+// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
+// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
+// thread roots).
+// Note: implementation is split so that the call sites may be optimized to no-ops in case no
+// lock counting is necessary. The actual implementation is in the cc file to avoid
+// dependencies.
+class LockCountData {
+ public:
+ // Add the given object to the list of monitors, that is, objects that have been locked. This
+ // will not throw (but be skipped if there is an exception pending on entry).
+ template <bool kLockCounting>
+ void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ AddMonitorInternal(self, obj);
+ }
+
+ // Try to remove the given object from the monitor list, indicating an unlock operation.
+ // This will throw an IllegalMonitorStateException (clearing any already pending exception), in
+ // case that there wasn't a lock recorded for the object.
+ template <bool kLockCounting>
+ void RemoveMonitorOrThrow(Thread* self,
+ const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ RemoveMonitorInternal(self, obj);
+ }
+
+ // Check whether all acquired monitors have been released. This will potentially throw an
+ // IllegalMonitorStateException, clearing any already pending exception. Returns true if the
+ // check shows that everything is OK wrt/ lock counting, false otherwise.
+ template <bool kLockCounting>
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return true;
+ }
+ return CheckAllMonitorsReleasedInternal(self);
+ }
+
+ template <typename T, typename... Args>
+ void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (monitors_ != nullptr) {
+ // Visitors may change the Object*. Be careful with the foreach loop.
+ for (mirror::Object*& obj : *monitors_) {
+ visitor(/* inout */ &obj, std::forward<Args>(args)...);
+ }
+ }
+ }
+
+ private:
+ // Internal implementations.
+ void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RemoveMonitorInternal(Thread* self, const mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Stores references to the locked-on objects. As noted, this should be visited during thread
+ // marking.
+ std::unique_ptr<std::vector<mirror::Object*>> monitors_;
+};
+
// ShadowFrame has 2 possible layouts:
// - interpreter - separate VRegs and reference arrays. References are in the reference array.
// - JNI - just VRegs, but where every VReg holds a reference.
@@ -272,6 +341,10 @@ class ShadowFrame {
}
}
+ LockCountData& GetLockCountData() {
+ return lock_count_data_;
+ }
+
static size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
@@ -330,6 +403,7 @@ class ShadowFrame {
ShadowFrame* link_;
ArtMethod* method_;
uint32_t dex_pc_;
+ LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
@@ -644,6 +718,10 @@ class StackVisitor {
return cur_shadow_frame_;
}
+ bool IsCurrentFrameInInterpreter() const {
+ return cur_shadow_frame_ != nullptr;
+ }
+
HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
ArtMethod** sp = GetCurrentQuickFrame();
// Skip ArtMethod*; handle scope comes next;
@@ -657,6 +735,8 @@ class StackVisitor {
static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtCode GetCurrentCode() const { return ArtCode(cur_quick_frame_); }
+
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index c317591ab5..8e0c288185 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,6 +32,7 @@
#include <sstream>
#include "arch/context.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
@@ -66,6 +67,7 @@
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "stack.h"
+#include "stack_map.h"
#include "thread_list.h"
#include "thread-inl.h"
#include "utils.h"
@@ -1493,7 +1495,9 @@ void Thread::DumpStack(std::ostream& os) const {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
+ ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
+ ArtCode art_code(method);
+ DumpNativeStack(os, GetTid(), " native: ", method, &art_code);
}
DumpJavaStack(os);
} else {
@@ -2651,7 +2655,7 @@ class ReferenceMapVisitor : public StackVisitor {
} else {
// Java method.
// Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(gc_map != nullptr) << PrettyMethod(m);
verifier::DexPcToReferenceMap dex_gc_map(gc_map);
uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -2671,6 +2675,8 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
}
+ // Mark lock count map required for structured locking checks.
+ shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this);
}
private:
@@ -2696,13 +2702,11 @@ class ReferenceMapVisitor : public StackVisitor {
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
@@ -2732,7 +2736,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
} else {
- const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -2740,14 +2744,12 @@ class ReferenceMapVisitor : public StackVisitor {
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = map.RegWidth() * 8;
if (num_regs > 0) {
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+ uintptr_t native_pc_offset =
+ GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
- const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
// For all dex registers in the bitmap
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 27daceaaf2..40cd6d340c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,6 +25,7 @@
#include <unistd.h>
#include <memory>
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
@@ -1092,7 +1093,7 @@ static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream
#endif
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
- ArtMethod* current_method, void* ucontext_ptr) {
+ ArtMethod* current_method, ArtCode* current_code, void* ucontext_ptr) {
#if __linux__
// b/18119146
if (RUNNING_ON_MEMORY_TOOL != 0) {
@@ -1148,8 +1149,8 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
try_addr2line = true;
} else if (
current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_method->PcIsWithinQuickCode(it->pc)) {
- const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
+ current_code->PcIsWithinQuickCode(it->pc)) {
+ const void* start_of_code = current_code->GetQuickOatEntryPoint(sizeof(void*));
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
} else {
@@ -1163,7 +1164,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
}
}
#else
- UNUSED(os, tid, prefix, current_method, ucontext_ptr);
+ UNUSED(os, tid, prefix, current_method, current_code, ucontext_ptr);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 19cc462762..b67f273f15 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -33,6 +33,7 @@
namespace art {
+class ArtCode;
class ArtField;
class ArtMethod;
class DexFile;
@@ -221,7 +222,7 @@ void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+ ArtMethod* current_method = nullptr, ArtCode* current_code = nullptr, void* ucontext = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
@@ -306,6 +307,14 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) {
void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+static inline const void* EntryPointToCodePointer(const void* entry_point) {
+ uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
+ // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
+ // least 2 byte aligned.
+ code &= ~0x1;
+ return reinterpret_cast<const void*>(code);
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 4f662d5a8f..90e24b9632 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -27,20 +27,25 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
+ const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
methods_offset_(types_offset_ +
- RoundUp(TypesSize(dex_file->NumTypeIds()), MethodsAlignment())),
+ RoundUp(TypesSize(header.type_ids_size_), MethodsAlignment())),
strings_offset_(methods_offset_ +
- RoundUp(MethodsSize(dex_file->NumMethodIds()), StringsAlignment())),
+ RoundUp(MethodsSize(header.method_ids_size_), StringsAlignment())),
fields_offset_(strings_offset_ +
- RoundUp(StringsSize(dex_file->NumStringIds()), FieldsAlignment())),
+ RoundUp(StringsSize(header.string_ids_size_), FieldsAlignment())),
size_(fields_offset_ +
- RoundUp(FieldsSize(dex_file->NumFieldIds()), Alignment())) {
+ RoundUp(FieldsSize(header.field_ids_size_), Alignment())) {
DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+ : DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
+}
+
inline size_t DexCacheArraysLayout::Alignment() const {
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index d50be5ac03..cd84460c3b 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
#define ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
+#include "dex_file.h"
+
namespace art {
/**
@@ -36,6 +38,9 @@ class DexCacheArraysLayout {
size_(0u) {
}
+ // Construct a layout for a particular dex file header.
+ DexCacheArraysLayout(size_t pointer_size, const DexFile::Header& header);
+
// Construct a layout for a particular dex file.
DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index eed3e22a72..4051a1cbe6 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -30,6 +30,7 @@
#include "dex_instruction-inl.h"
#include "dex_instruction_utils.h"
#include "dex_instruction_visitor.h"
+#include "experimental_flags.h"
#include "gc/accounting/card_table-inl.h"
#include "indenter.h"
#include "intern_table.h"
@@ -560,6 +561,7 @@ SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
+ Runtime* runtime = Runtime::Current();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
@@ -628,9 +630,13 @@ bool MethodVerifier::Verify() {
}
}
if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
- // Interface methods must be public and abstract.
- if ((method_access_flags_ & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public and abstract";
+ // Interface methods must be public and abstract (if default methods are disabled).
+ bool default_methods_supported =
+ runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
+ uint32_t kRequired = kAccPublic | (default_methods_supported ? 0 : kAccAbstract);
+ if ((method_access_flags_ & kRequired) != kRequired) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public"
+ << (default_methods_supported ? "" : " and abstract");
return false;
}
// In addition to the above, interface methods must not be protected.
@@ -657,10 +663,22 @@ bool MethodVerifier::Verify() {
return false;
}
- // Only the static initializer may have code in an interface.
if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
- // Interfaces may have static initializers for their fields.
- if (!IsConstructor() || !IsStatic()) {
+ // Interfaces may always have static initializers for their fields. If we are running with
+ // default methods enabled we also allow other public, static, non-final methods to have code.
+ // Otherwise that is the only type of method allowed.
+ if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
+ if (IsInstanceConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
+ return false;
+ } else if (method_access_flags_ & kAccFinal) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
+ return false;
+ } else if (!(method_access_flags_ & kAccPublic)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ return false;
+ }
+ } else if (!IsConstructor() || !IsStatic()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
return false;
}
@@ -682,6 +700,7 @@ bool MethodVerifier::Verify() {
<< " regs=" << code_item_->registers_size_;
return false;
}
+
// Allocate and initialize an array to hold instruction data.
insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
// Run through the instructions and see if the width checks out.
@@ -693,8 +712,8 @@ bool MethodVerifier::Verify() {
// Perform code-flow analysis and return.
result = result && VerifyCodeFlow();
// Compute information for compiler.
- if (result && Runtime::Current()->IsCompiler()) {
- result = Runtime::Current()->GetCompilerCallbacks()->MethodVerified(this);
+ if (result && runtime->IsCompiler()) {
+ result = runtime->GetCompilerCallbacks()->MethodVerified(this);
}
return result;
}
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 33c90e3000..02c93cf864 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -391,6 +391,34 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
}
}
+// Check whether there is another register in the search map that is locked the same way as the
+// register in the src map. This establishes an alias.
+static bool FindLockAliasedRegister(
+ uint32_t src,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+ auto it = src_map.find(src);
+ if (it == src_map.end()) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+ uint32_t src_lock_levels = it->second;
+ if (src_lock_levels == 0) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+
+ // Scan the map for the same value.
+ for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+ if (pair.first != src && pair.second == src_lock_levels) {
+ return true;
+ }
+ }
+
+ // Nothing found, no alias.
+ return false;
+}
+
bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
bool changed = false;
DCHECK(incoming_line != nullptr);
@@ -417,9 +445,29 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
size_t depths = reg_to_lock_depths_.count(idx);
size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
if (depths != incoming_depths) {
- if (depths == 0 || incoming_depths == 0) {
- reg_to_lock_depths_.erase(idx);
- } else {
+ // Stack levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // | |
+ // v0 = v1 {v0=1, v1=1} v0 = v2 {v1=1}
+ // | |
+ // {v1=1}
+ // // Dropping v0, as the status can't be merged
+ // // but the lock info ("locked at depth 1" and)
+ // // "not locked at all") is available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "mismatched stack depths for register v" << idx
@@ -429,20 +477,51 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
}
break;
}
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
} else if (depths > 0) {
// Check whether they're actually the same levels.
uint32_t locked_levels = reg_to_lock_depths_.find(idx)->second;
uint32_t incoming_locked_levels = incoming_line->reg_to_lock_depths_.find(idx)->second;
if (locked_levels != incoming_locked_levels) {
- verifier->Fail(VERIFY_ERROR_LOCKING);
- if (kDumpLockFailures) {
- LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
- << std::hex << locked_levels << std::dec << " != "
- << std::hex << incoming_locked_levels << std::dec << " in "
- << PrettyMethod(verifier->GetMethodReference().dex_method_index,
- *verifier->GetMethodReference().dex_file);
+ // Lock levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // lock v2 {v1=1, v2=2}
+ // | |
+ // v0 = v1 {v0=1, v1=1, v2=2} v0 = v2 {v0=2, v1=1, v2=2}
+ // | |
+ // {v1=1, v2=2}
+ // // Dropping v0, as the status can't be
+ // // merged but the lock info ("locked at
+ // // depth 1" and "locked at depth 2") is
+ // // available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
+ // No aliases for both current and incoming, we'll lose information.
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
+ << std::hex << locked_levels << std::dec << " != "
+ << std::hex << incoming_locked_levels << std::dec << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
+ break;
}
- break;
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
}
}
}
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 285df18c72..f8d321cbec 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -22,11 +22,11 @@ namespace art {
#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
- uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, \
+ uintptr_t native_quick_pc = GetCurrentCode().ToNativeQuickPc(dex_pc, \
/* is_catch_handler */ false, \
abort_if_not_found); \
if (native_quick_pc != UINTPTR_MAX) { \
- CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+ CheckReferences(t, t_size, GetCurrentCode().NativeQuickPcOffset(native_quick_pc)); \
} \
} while (false);
@@ -49,7 +49,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
@@ -65,7 +65,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
// v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
// v5 is removed from the root set because there is a "merge" operation.
@@ -74,7 +74,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
}
CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index 218805543e..d742b1410a 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -27,6 +27,13 @@ public class Main {
*/
public static void main(String[] args) {
System.loadLibrary(args[0]);
+ if (!hasOatFile() || runtimeIsSoftFail() || isInterpreted()) {
+ // Some tests ensure that the verifier was able to guarantee balanced locking by
+ // asserting that the test function is running as compiled code. But skip this now,
+ // as this seems to be a non-compiled code test configuration.
+ disableStackFrameAsserts();
+ }
+
Main m = new Main();
m.recursiveSync(0);
@@ -49,7 +56,7 @@ public class Main {
Object obj1 = new Object();
Object obj2 = new Object();
- m.twoPath(obj1, obj2, 0);
+ TwoPath.twoPath(obj1, obj2, 0);
System.out.println("twoPath ok");
m.triplet(obj1, obj2, 0);
@@ -62,6 +69,7 @@ public class Main {
* Recursive synchronized method.
*/
synchronized void recursiveSync(int iter) {
+ assertIsManaged();
if (iter < 40) {
recursiveSync(iter+1);
} else {
@@ -73,6 +81,7 @@ public class Main {
* Tests simple nesting, with and without a throw.
*/
void nestedMayThrow(boolean doThrow) {
+ assertIsManaged();
synchronized (this) {
synchronized (Main.class) {
synchronized (new Object()) {
@@ -90,6 +99,7 @@ public class Main {
* Exercises bug 3215458.
*/
void constantLock() {
+ assertIsManaged();
Class thing = Thread.class;
synchronized (Thread.class) {}
}
@@ -98,6 +108,7 @@ public class Main {
* Confirms that we can have 32 nested monitors on one method.
*/
void notExcessiveNesting() {
+ assertIsManaged();
synchronized (this) { // 1
synchronized (this) { // 2
synchronized (this) { // 3
@@ -138,6 +149,7 @@ public class Main {
* method.
*/
void notNested() {
+ assertIsManaged();
synchronized (this) {} // 1
synchronized (this) {} // 2
synchronized (this) {} // 3
@@ -178,25 +190,6 @@ public class Main {
private void doNothing(Object obj) {}
/**
- * Conditionally uses one of the synchronized objects.
- */
- public void twoPath(Object obj1, Object obj2, int x) {
- Object localObj;
-
- synchronized (obj1) {
- synchronized(obj2) {
- if (x == 0) {
- localObj = obj2;
- } else {
- localObj = obj1;
- }
- }
- }
-
- doNothing(localObj);
- }
-
- /**
* Lock the monitor two or three times, and make use of the locked or
* unlocked object.
*/
@@ -220,17 +213,12 @@ public class Main {
// Smali testing code.
private static void runSmaliTests() {
- if (!hasOatFile() || runtimeIsSoftFail() || isInterpreted()) {
- // Skip test, this seems to be a non-compiled code test configuration.
- return;
- }
-
runTest("OK", new Object[] { new Object(), new Object() }, null);
runTest("TooDeep", new Object[] { new Object() }, null);
runTest("NotStructuredOverUnlock", new Object[] { new Object() },
IllegalMonitorStateException.class);
- runTest("NotStructuredUnderUnlock", new Object[] { new Object() }, null);
- // TODO: new IllegalMonitorStateException());
+ runTest("NotStructuredUnderUnlock", new Object[] { new Object() },
+ IllegalMonitorStateException.class);
runTest("UnbalancedJoin", new Object[] { new Object(), new Object() }, null);
runTest("UnbalancedStraight", new Object[] { new Object(), new Object() }, null);
}
@@ -282,4 +270,5 @@ public class Main {
public static native boolean hasOatFile();
public static native boolean runtimeIsSoftFail();
public static native boolean isInterpreted();
+ public static native void disableStackFrameAsserts();
}
diff --git a/test/088-monitor-verification/src/TwoPath.java b/test/088-monitor-verification/src/TwoPath.java
new file mode 100644
index 0000000000..bdc15ad82e
--- /dev/null
+++ b/test/088-monitor-verification/src/TwoPath.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+/*
+ * Test case for conditionally using one of two synchronized objects.
+ *
+ * This code cannot be verified at the moment, as the join point merges a register with two
+ * different lock options. Do not put it into Main to avoid the whole class being run in the
+ * interpreter.
+ */
+public class TwoPath {
+
+ /**
+ * Conditionally uses one of the synchronized objects.
+ */
+ public static void twoPath(Object obj1, Object obj2, int x) {
+ Main.assertIsManaged();
+
+ Object localObj;
+
+ synchronized (obj1) {
+ synchronized(obj2) {
+ if (x == 0) {
+ localObj = obj2;
+ } else {
+ localObj = obj1;
+ }
+ }
+ }
+
+ doNothing(localObj);
+ }
+
+ private static void doNothing(Object o) {
+ }
+}
diff --git a/test/131-structural-change/expected.txt b/test/131-structural-change/expected.txt
index cc7713d252..1d19278f1e 100644
--- a/test/131-structural-change/expected.txt
+++ b/test/131-structural-change/expected.txt
@@ -1,2 +1,3 @@
+JNI_OnLoad called
Should really reach here.
Done.
diff --git a/test/131-structural-change/src/Main.java b/test/131-structural-change/src/Main.java
index 6cbbd12387..c7488992df 100644
--- a/test/131-structural-change/src/Main.java
+++ b/test/131-structural-change/src/Main.java
@@ -35,7 +35,7 @@ public class Main {
e.printStackTrace(System.out);
}
- boolean haveOatFile = hasOat();
+ boolean haveOatFile = hasOatFile();
boolean gotError = false;
try {
Class<?> bClass = getClass().getClassLoader().loadClass("B");
@@ -45,10 +45,10 @@ public class Main {
e.printStackTrace(System.out);
}
if (haveOatFile ^ gotError) {
- System.out.println("Did not get expected error.");
+ System.out.println("Did not get expected error. " + haveOatFile + " " + gotError);
}
System.out.println("Done.");
}
- private native static boolean hasOat();
+ private native static boolean hasOatFile();
}
diff --git a/test/141-class-unload/expected.txt b/test/141-class-unload/expected.txt
index 53d7abecaf..11de660c43 100644
--- a/test/141-class-unload/expected.txt
+++ b/test/141-class-unload/expected.txt
@@ -21,3 +21,4 @@ null
JNI_OnLoad called
class null false test
JNI_OnUnload called
+Number of loaded unload-ex maps 0
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 3cc43accbe..0640b364c9 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
import java.lang.ref.WeakReference;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
@@ -43,11 +46,28 @@ public class Main {
testStackTrace(constructor);
// Stress test to make sure we dont leak memory.
stressTest(constructor);
+ // Test that the oat files are unloaded.
+ testOatFilesUnloaded(getPid());
} catch (Exception e) {
System.out.println(e);
}
}
+ private static void testOatFilesUnloaded(int pid) throws Exception {
+ BufferedReader reader = new BufferedReader(new FileReader ("/proc/" + pid + "/maps"));
+ String line;
+ int count = 0;
+ Runtime.getRuntime().gc();
+ System.runFinalization();
+ while ((line = reader.readLine()) != null) {
+ if (line.contains("@141-class-unload-ex.jar")) {
+ System.out.println(line);
+ ++count;
+ }
+ }
+ System.out.println("Number of loaded unload-ex maps " + count);
+ }
+
private static void stressTest(Constructor constructor) throws Exception {
for (int i = 0; i <= 100; ++i) {
setUpUnloadLoader(constructor, false);
@@ -163,4 +183,8 @@ public class Main {
loadLibrary.invoke(intHolder, nativeLibraryName);
return new WeakReference(loader);
}
+
+ private static int getPid() throws Exception {
+ return Integer.parseInt(new File("/proc/self").getCanonicalFile().getName());
+ }
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 9facfdb076..0ee2ff9fda 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -45,10 +46,14 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVReg(m, 1, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
success = GetVReg(m, 2, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
CHECK(GetVReg(m, 3, kReferenceVReg, &value));
CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
@@ -78,10 +83,14 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
uint32_t value32 = 0;
CHECK(GetVReg(m, 6, kReferenceVReg, &value32));
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index c21168b81e..6fcebdb8b5 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -63,7 +64,9 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 1u);
bool success = GetVReg(m, 2, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
CHECK(GetVReg(m, 3, kReferenceVReg, &value));
CHECK_EQ(value, 1u);
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 7e9a583faf..2a56a7fce7 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -43,7 +44,7 @@ class TestVisitor : public StackVisitor {
found_method_ = true;
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
- m->IsOptimized(sizeof(void*)) &&
+ GetCurrentCode().IsOptimized(sizeof(void*)) &&
!Runtime::Current()->IsDebuggable()) {
CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
} else {
diff --git a/test/537-checker-arraycopy/expected.txt b/test/537-checker-arraycopy/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/537-checker-arraycopy/expected.txt
diff --git a/test/537-checker-arraycopy/info.txt b/test/537-checker-arraycopy/info.txt
new file mode 100644
index 0000000000..ea88f89306
--- /dev/null
+++ b/test/537-checker-arraycopy/info.txt
@@ -0,0 +1 @@
+Test for edge cases of System.arraycopy.
diff --git a/test/537-checker-arraycopy/src/Main.java b/test/537-checker-arraycopy/src/Main.java
new file mode 100644
index 0000000000..30ccc56b80
--- /dev/null
+++ b/test/537-checker-arraycopy/src/Main.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+ public static void main(String[] args) {
+ arraycopy();
+ try {
+ arraycopy(new Object());
+ throw new Error("Should not be here");
+ } catch (ArrayStoreException ase) {
+ // Ignore.
+ }
+ try {
+ arraycopy(null);
+ throw new Error("Should not be here");
+ } catch (NullPointerException npe) {
+ // Ignore.
+ }
+
+ try {
+ arraycopy(new Object[1]);
+ throw new Error("Should not be here");
+ } catch (ArrayIndexOutOfBoundsException aiooe) {
+ // Ignore.
+ }
+
+ arraycopy(new Object[2]);
+ arraycopy(new Object[2], 0);
+
+ try {
+ arraycopy(new Object[1], 1);
+ throw new Error("Should not be here");
+ } catch (ArrayIndexOutOfBoundsException aiooe) {
+ // Ignore.
+ }
+ }
+
+ /// CHECK-START-X86_64: void Main.arraycopy() disassembly (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: test
+ /// CHECK-NOT: call
+ /// CHECK: ReturnVoid
+ // Checks that the call is intrinsified and that there is no test instruction
+ // when we know the source and destination are not null.
+ public static void arraycopy() {
+ Object[] obj = new Object[4];
+ System.arraycopy(obj, 1, obj, 0, 1);
+ }
+
+ public static void arraycopy(Object obj) {
+ System.arraycopy(obj, 1, obj, 0, 1);
+ }
+
+ public static void arraycopy(Object[] obj, int pos) {
+ System.arraycopy(obj, pos, obj, 0, obj.length);
+ }
+}
diff --git a/test/538-checker-embed-constants/expected.txt b/test/538-checker-embed-constants/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/538-checker-embed-constants/expected.txt
diff --git a/test/538-checker-embed-constants/info.txt b/test/538-checker-embed-constants/info.txt
new file mode 100644
index 0000000000..5a722ecf12
--- /dev/null
+++ b/test/538-checker-embed-constants/info.txt
@@ -0,0 +1 @@
+Test embedding of constants in assembler instructions.
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
new file mode 100644
index 0000000000..d8618e30fb
--- /dev/null
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /// CHECK-START-ARM: int Main.and255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: and {{r\d+}}, {{r\d+}}, #255
+
+ public static int and255(int arg) {
+ return arg & 255;
+ }
+
+ /// CHECK-START-ARM: int Main.and511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int and511(int arg) {
+ return arg & 511;
+ }
+
+ /// CHECK-START-ARM: int Main.andNot15(int) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK: bic {{r\d+}}, {{r\d+}}, #15
+
+ public static int andNot15(int arg) {
+ return arg & ~15;
+ }
+
+ /// CHECK-START-ARM: int Main.or255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: orr {{r\d+}}, {{r\d+}}, #255
+
+ public static int or255(int arg) {
+ return arg | 255;
+ }
+
+ /// CHECK-START-ARM: int Main.or511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int or511(int arg) {
+ return arg | 511;
+ }
+
+ /// CHECK-START-ARM: int Main.orNot15(int) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK: orn {{r\d+}}, {{r\d+}}, #15
+
+ public static int orNot15(int arg) {
+ return arg | ~15;
+ }
+
+ /// CHECK-START-ARM: int Main.xor255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: eor {{r\d+}}, {{r\d+}}, #255
+
+ public static int xor255(int arg) {
+ return arg ^ 255;
+ }
+
+ /// CHECK-START-ARM: int Main.xor511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int xor511(int arg) {
+ return arg ^ 511;
+ }
+
+ /// CHECK-START-ARM: int Main.xorNot15(int) disassembly (after)
+ /// CHECK: mvn {{r\d+}}, #15
+ /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int xorNot15(int arg) {
+ return arg ^ ~15;
+ }
+
+ /// CHECK-START-ARM: long Main.and255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-DAG: movs {{r\d+}}, #0
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and255(long arg) {
+ return arg & 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.and511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: movs {{r\d+}}, #0
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and511(long arg) {
+ return arg & 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.andNot15(long) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK: bic {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long andNot15(long arg) {
+ return arg & ~15L;
+ }
+
+ /// CHECK-START-ARM: long Main.and0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: bic {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and0xfffffff00000000f(long arg) {
+ return arg & 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.or255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK: orr {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or255(long arg) {
+ return arg | 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.or511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or511(long arg) {
+ return arg | 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.orNot15(long) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: mvn {{r\d+}}, #0
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long orNot15(long arg) {
+ return arg | ~15L;
+ }
+
+ /// CHECK-START-ARM: long Main.or0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK-DAG: orr {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or0xfffffff00000000f(long arg) {
+ return arg | 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.xor255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: eor
+ /// CHECK: eor {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-NOT: eor
+
+ public static long xor255(long arg) {
+ return arg ^ 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.xor511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xor511(long arg) {
+ return arg ^ 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.xorNot15(long) disassembly (after)
+ /// CHECK-DAG: mvn {{r\d+}}, #15
+ /// CHECK-DAG: mov.w {{r\d+}}, #-1
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xorNot15(long arg) {
+ return arg ^ ~15L;
+ }
+
+ // Note: No support for partial long constant embedding.
+ /// CHECK-START-ARM: long Main.xor0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-DAG: movs {{r\d+}}, #15
+ /// CHECK-DAG: mvn {{r\d+}}, #15
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xor0xfffffff00000000f(long arg) {
+ return arg ^ 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.xor0xf00000000000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mov.w {{r\d+}}, #-268435456
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #-268435456
+ /// CHECK-NOT: eor
+
+ public static long xor0xf00000000000000f(long arg) {
+ return arg ^ 0xf00000000000000fL;
+ }
+
+ public static void main(String[] args) {
+ int arg = 0x87654321;
+ assertIntEquals(and255(arg), 0x21);
+ assertIntEquals(and511(arg), 0x121);
+ assertIntEquals(andNot15(arg), 0x87654320);
+ assertIntEquals(or255(arg), 0x876543ff);
+ assertIntEquals(or511(arg), 0x876543ff);
+ assertIntEquals(orNot15(arg), 0xfffffff1);
+ assertIntEquals(xor255(arg), 0x876543de);
+ assertIntEquals(xor511(arg), 0x876542de);
+ assertIntEquals(xorNot15(arg), 0x789abcd1);
+
+ long longArg = 0x1234567887654321L;
+ assertLongEquals(and255(longArg), 0x21L);
+ assertLongEquals(and511(longArg), 0x121L);
+ assertLongEquals(andNot15(longArg), 0x1234567887654320L);
+ assertLongEquals(and0xfffffff00000000f(longArg), 0x1234567000000001L);
+ assertLongEquals(or255(longArg), 0x12345678876543ffL);
+ assertLongEquals(or511(longArg), 0x12345678876543ffL);
+ assertLongEquals(orNot15(longArg), 0xfffffffffffffff1L);
+ assertLongEquals(or0xfffffff00000000f(longArg), 0xfffffff88765432fL);
+ assertLongEquals(xor255(longArg), 0x12345678876543deL);
+ assertLongEquals(xor511(longArg), 0x12345678876542deL);
+ assertLongEquals(xorNot15(longArg), 0xedcba987789abcd1L);
+ assertLongEquals(xor0xfffffff00000000f(longArg), 0xedcba9888765432eL);
+ assertLongEquals(xor0xf00000000000000f(longArg), 0xe23456788765432eL);
+ }
+}
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index 2aeca8c8fc..b7546801b9 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
# limitations under the License.
# Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental-lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental-lambdas
+${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
new file mode 100755
index 0000000000..c7866878e9
--- /dev/null
+++ b/test/960-default-smali/build
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# Generate the smali Main.smali file or fail
+./util-src/generate_smali.py ./smali
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/expected.txt b/test/960-default-smali/expected.txt
new file mode 100644
index 0000000000..7671eed5de
--- /dev/null
+++ b/test/960-default-smali/expected.txt
@@ -0,0 +1,84 @@
+Testing for type A
+A-virtual A.SayHi()='Hi '
+A-interface Greeter.SayHi()='Hi '
+A-virtual A.SayHiTwice()='Hi Hi '
+A-interface Greeter.SayHiTwice()='Hi Hi '
+End testing for type A
+Testing for type B
+B-virtual B.SayHi()='Hello '
+B-interface Greeter.SayHi()='Hello '
+B-interface Greeter2.SayHi()='Hello '
+B-virtual B.SayHiTwice()='I say Hello Hello '
+B-interface Greeter.SayHiTwice()='I say Hello Hello '
+B-interface Greeter2.SayHiTwice()='I say Hello Hello '
+End testing for type B
+Testing for type C
+C-virtual A.SayHi()='Hi '
+C-virtual C.SayHi()='Hi '
+C-interface Greeter.SayHi()='Hi '
+C-virtual A.SayHiTwice()='You don't control me'
+C-virtual C.SayHiTwice()='You don't control me'
+C-interface Greeter.SayHiTwice()='You don't control me'
+End testing for type C
+Testing for type D
+D-virtual D.GetName()='Alex '
+D-interface Greeter3.GetName()='Alex '
+D-virtual D.SayHi()='Hello Alex '
+D-interface Greeter.SayHi()='Hello Alex '
+D-interface Greeter3.SayHi()='Hello Alex '
+D-virtual D.SayHiTwice()='Hello Alex Hello Alex '
+D-interface Greeter.SayHiTwice()='Hello Alex Hello Alex '
+D-interface Greeter3.SayHiTwice()='Hello Alex Hello Alex '
+End testing for type D
+Testing for type E
+E-virtual A.SayHi()='Hi2 '
+E-virtual E.SayHi()='Hi2 '
+E-interface Greeter.SayHi()='Hi2 '
+E-interface Greeter2.SayHi()='Hi2 '
+E-virtual A.SayHiTwice()='I say Hi2 Hi2 '
+E-virtual E.SayHiTwice()='I say Hi2 Hi2 '
+E-interface Greeter.SayHiTwice()='I say Hi2 Hi2 '
+E-interface Greeter2.SayHiTwice()='I say Hi2 Hi2 '
+End testing for type E
+Testing for type F
+F-interface Attendant.GetPlace()='android'
+F-virtual F.GetPlace()='android'
+F-virtual A.SayHi()='Hi '
+F-interface Attendant.SayHi()='Hi '
+F-virtual F.SayHi()='Hi '
+F-interface Greeter.SayHi()='Hi '
+F-virtual A.SayHiTwice()='We can override both interfaces'
+F-interface Attendant.SayHiTwice()='We can override both interfaces'
+F-virtual F.SayHiTwice()='We can override both interfaces'
+F-interface Greeter.SayHiTwice()='We can override both interfaces'
+End testing for type F
+Testing for type G
+G-interface Attendant.GetPlace()='android'
+G-virtual G.GetPlace()='android'
+G-interface Attendant.SayHi()='welcome to android'
+G-virtual G.SayHi()='welcome to android'
+G-interface Attendant.SayHiTwice()='welcome to androidwelcome to android'
+G-virtual G.SayHiTwice()='welcome to androidwelcome to android'
+End testing for type G
+Testing for type H
+H-interface Extension.SayHi()='welcome '
+H-virtual H.SayHi()='welcome '
+End testing for type H
+Testing for type I
+I-virtual A.SayHi()='Hi '
+I-interface Greeter.SayHi()='Hi '
+I-interface Greeter2.SayHi()='Hi '
+I-virtual I.SayHi()='Hi '
+I-virtual A.SayHiTwice()='I say Hi Hi '
+I-interface Greeter.SayHiTwice()='I say Hi Hi '
+I-interface Greeter2.SayHiTwice()='I say Hi Hi '
+I-virtual I.SayHiTwice()='I say Hi Hi '
+End testing for type I
+Testing for type J
+J-virtual A.SayHi()='Hi '
+J-interface Greeter.SayHi()='Hi '
+J-virtual J.SayHi()='Hi '
+J-virtual A.SayHiTwice()='Hi Hi '
+J-interface Greeter.SayHiTwice()='Hi Hi '
+J-virtual J.SayHiTwice()='Hi Hi '
+End testing for type J
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
new file mode 100644
index 0000000000..eb596e2c9f
--- /dev/null
+++ b/test/960-default-smali/info.txt
@@ -0,0 +1,19 @@
+Smali-based tests for experimental interface default methods.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run a Main.smali file will be generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the tools/extract-embedded-java script to
+turn the smali into equivalent Java using the embedded Java code.
+
+When updating be sure to write the equivalent Java code in comments of the smali
+files.
+
+Care should be taken when updating the generate_smali.py script. It must always
+return equivalent output when run multiple times.
+
+To update the test files do the following steps:
+ <Add new classes/interfaces>
+ <Add these classes/interfaces to ./smali/classes.xml>
+ JAVA_HOME="/path/to/java-8-jdk" ../run-test --use-java-home --update --jvm --host 956-default-smali
+ git add ./smali/classes.xml ./expected.txt
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/960-default-smali/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/960-default-smali/smali/A.smali b/test/960-default-smali/smali/A.smali
new file mode 100644
index 0000000000..e755612fbe
--- /dev/null
+++ b/test/960-default-smali/smali/A.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LA;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# class A implements Greeter {
+# public String SayHi() {
+# return "Hi ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hi "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Attendant.smali b/test/960-default-smali/smali/Attendant.smali
new file mode 100644
index 0000000000..ab63aeefcb
--- /dev/null
+++ b/test/960-default-smali/smali/Attendant.smali
@@ -0,0 +1,53 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LAttendant;
+.super Ljava/lang/Object;
+
+# public interface Attendant {
+# public default String SayHi() {
+# return "welcome to " + GetPlace();
+# }
+# public default String SayHiTwice() {
+# return SayHi() + SayHi();
+# }
+#
+# public String GetPlace();
+# }
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ const-string v0, "welcome to "
+ invoke-interface {p0}, LAttendant;->GetPlace()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 2
+ invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public abstract GetPlace()Ljava/lang/String;
+.end method
diff --git a/test/960-default-smali/smali/B.smali b/test/960-default-smali/smali/B.smali
new file mode 100644
index 0000000000..d847dd12ff
--- /dev/null
+++ b/test/960-default-smali/smali/B.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LB;
+.super Ljava/lang/Object;
+.implements LGreeter2;
+
+# class B implements Greeter2 {
+# public String SayHi() {
+# return "Hello ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hello "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/C.smali b/test/960-default-smali/smali/C.smali
new file mode 100644
index 0000000000..08a8508be1
--- /dev/null
+++ b/test/960-default-smali/smali/C.smali
@@ -0,0 +1,37 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LC;
+.super LA;
+
+# class C extends A {
+# public String SayHiTwice() {
+# return "You don't control me";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "You don't control me"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/D.smali b/test/960-default-smali/smali/D.smali
new file mode 100644
index 0000000000..32f3b7ec8b
--- /dev/null
+++ b/test/960-default-smali/smali/D.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LD;
+.super Ljava/lang/Object;
+.implements LGreeter3;
+
+# class D implements Greeter3 {
+# public String GetName() {
+# return "Alex ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public GetName()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Alex "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/E.smali b/test/960-default-smali/smali/E.smali
new file mode 100644
index 0000000000..bae6250414
--- /dev/null
+++ b/test/960-default-smali/smali/E.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LE;
+.super LA;
+.implements LGreeter2;
+
+# class E extends A implements Greeter2 {
+# public String SayHi() {
+# return "Hi2 ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hi2 "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Extension.smali b/test/960-default-smali/smali/Extension.smali
new file mode 100644
index 0000000000..60ffa26ec6
--- /dev/null
+++ b/test/960-default-smali/smali/Extension.smali
@@ -0,0 +1,30 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LExtension;
+.super Ljava/lang/Object;
+
+# public interface Extension {
+# public default String SayHi() {
+# return "welcome ";
+# }
+# }
+
+.method public SayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "welcome "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/F.smali b/test/960-default-smali/smali/F.smali
new file mode 100644
index 0000000000..3eaa089e1f
--- /dev/null
+++ b/test/960-default-smali/smali/F.smali
@@ -0,0 +1,47 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LF;
+.super LA;
+.implements LAttendant;
+
+# class F extends A implements Attendant {
+# public String GetPlace() {
+# return "android";
+# }
+# public String SayHiTwice() {
+# return "We can override both interfaces";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "We can override both interfaces"
+ return-object v0
+.end method
+
+.method public GetPlace()Ljava/lang/String;
+ .registers 1
+ const-string v0, "android"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/G.smali b/test/960-default-smali/smali/G.smali
new file mode 100644
index 0000000000..446f2a4c64
--- /dev/null
+++ b/test/960-default-smali/smali/G.smali
@@ -0,0 +1,37 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LG;
+.super Ljava/lang/Object;
+.implements LAttendant;
+
+# class G implements Attendant {
+# public String GetPlace() {
+# return "android";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public GetPlace()Ljava/lang/String;
+ .registers 1
+ const-string v0, "android"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter.smali b/test/960-default-smali/smali/Greeter.smali
new file mode 100644
index 0000000000..28530ffc6f
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter;
+.super Ljava/lang/Object;
+
+# public interface Greeter {
+# public String SayHi();
+#
+# public default String SayHiTwice() {
+# return SayHi() + SayHi();
+# }
+# }
+
+.method public abstract SayHi()Ljava/lang/String;
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 2
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter2.smali b/test/960-default-smali/smali/Greeter2.smali
new file mode 100644
index 0000000000..ace1798bab
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter2.smali
@@ -0,0 +1,39 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter2;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# public interface Greeter2 extends Greeter {
+# public default String SayHiTwice() {
+# return "I say " + SayHi() + SayHi();
+# }
+# }
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 3
+ const-string v0, "I say "
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter3.smali b/test/960-default-smali/smali/Greeter3.smali
new file mode 100644
index 0000000000..31fc2e79ff
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter3.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter3;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# public interface Greeter3 extends Greeter {
+# public String GetName();
+#
+# public default String SayHi() {
+# return "Hello " + GetName();
+# }
+# }
+
+.method public abstract GetName()Ljava/lang/String;
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ const-string v0, "Hello "
+ invoke-interface {p0}, LGreeter3;->GetName()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/H.smali b/test/960-default-smali/smali/H.smali
new file mode 100644
index 0000000000..82065ea49d
--- /dev/null
+++ b/test/960-default-smali/smali/H.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LH;
+.super Ljava/lang/Object;
+.implements LExtension;
+
+# class H implements Extension {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/960-default-smali/smali/I.smali b/test/960-default-smali/smali/I.smali
new file mode 100644
index 0000000000..72fb58afe4
--- /dev/null
+++ b/test/960-default-smali/smali/I.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LI;
+.super LA;
+.implements LGreeter2;
+
+# class I extends A implements Greeter2 {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/960-default-smali/smali/J.smali b/test/960-default-smali/smali/J.smali
new file mode 100644
index 0000000000..93f3d6231c
--- /dev/null
+++ b/test/960-default-smali/smali/J.smali
@@ -0,0 +1,29 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LJ;
+.super LA;
+
+# class J extends A {
+# }
+
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
diff --git a/test/960-default-smali/smali/classes.xml b/test/960-default-smali/smali/classes.xml
new file mode 100644
index 0000000000..0aa41f7fb6
--- /dev/null
+++ b/test/960-default-smali/smali/classes.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<data>
+ <classes>
+ <class name="A" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="B" super="java/lang/Object">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="C" super="A">
+ <implements> </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="D" super="java/lang/Object">
+ <implements>
+ <item>Greeter3</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="E" super="A">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="F" super="A">
+ <implements>
+ <item>Attendant</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="G" super="java/lang/Object">
+ <implements>
+ <item>Attendant</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="H" super="java/lang/Object">
+ <implements>
+ <item>Extension</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="I" super="A">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="J" super="A">
+ <implements> </implements>
+ <methods> </methods>
+ </class>
+ </classes>
+
+ <interfaces>
+ <interface name="Extension" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="default">SayHi</method>
+ </methods>
+ </interface>
+
+ <interface name="Greeter" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="abstract">SayHi</method>
+ <method type="default">SayHiTwice</method>
+ </methods>
+ </interface>
+
+ <interface name="Greeter2" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods> </methods>
+ </interface>
+
+ <interface name="Greeter3" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods>
+ <method type="abstract">GetName</method>
+ </methods>
+ </interface>
+
+ <interface name="Attendant" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="default">SayHi</method>
+ <method type="default">SayHiTwice</method>
+ <method type="abstract">GetPlace</method>
+ </methods>
+ </interface>
+ </interfaces>
+</data>
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/960-default-smali/util-src/generate_smali.py
new file mode 100755
index 0000000000..b2bf1f0761
--- /dev/null
+++ b/test/960-default-smali/util-src/generate_smali.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali Main file for test 960
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright
+import testgen.mixins as mixins
+
+from collections import namedtuple
+import itertools
+import functools
+import xml.etree.ElementTree as ET
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A mainclass and main method for this test.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{test_funcs}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass
+ """
+ self.tests = set()
+ self.global_funcs = set()
+
+ def add_instance(self, it):
+ """
+ Add an instance test for the given class
+ """
+ self.tests.add(it)
+
+ def add_func(self, f):
+ """
+ Add a function to the class
+ """
+ self.global_funcs.add(f)
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print this class
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in sorted(all_tests):
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ funcs = ""
+ for f in self.global_funcs:
+ funcs += str(f)
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ test_groups=test_groups,
+ main_func=main_func, test_funcs=funcs)
+
+
+class InstanceTest(mixins.Named, mixins.NameComparableMixin):
+ """
+ A method that runs tests for a particular concrete type, It calls the test
+ cases for running it in all possible ways.
+ """
+
+ INSTANCE_TEST_TEMPLATE = """
+# public static void {test_name}() {{
+# System.out.println("Testing for type {ty}");
+# String s = "{ty}";
+# {ty} v = new {ty}();
+.method public static {test_name}()V
+ .locals 3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Testing for type {ty}"
+ invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "{ty}"
+ new-instance v1, L{ty};
+ invoke-direct {{v1}}, L{ty};-><init>()V
+
+ {invokes}
+
+ const-string v0, "End testing for type {ty}"
+ invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+# System.out.println("End testing for type {ty}");
+# }}
+"""
+
+ TEST_INVOKE_TEMPLATE = """
+# {fname}(s, v);
+ invoke-static {{v0, v1}}, {fname}(Ljava/lang/String;L{farg};)V
+"""
+
+ def __init__(self, main, ty):
+ """
+ Initialize this test group for the given type
+ """
+ self.ty = ty
+ self.main = main
+ self.funcs = set()
+ self.main.add_instance(self)
+
+ def get_name(self):
+ """
+ Get the name of this test group
+ """
+ return "TEST_NAME_"+self.ty
+
+ def add_func(self, f):
+ """
+ Add a test function to this test group
+ """
+ self.main.add_func(f)
+ self.funcs.add(f)
+
+ def __str__(self):
+ """
+ Returns the smali code for this function
+ """
+ func_invokes = ""
+ for f in sorted(self.funcs, key=lambda a: (a.func, a.farg)):
+ func_invokes += self.TEST_INVOKE_TEMPLATE.format(fname=f.get_name(),
+ farg=f.farg)
+
+ return self.INSTANCE_TEST_TEMPLATE.format(test_name=self.get_name(), ty=self.ty,
+ invokes=func_invokes)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A single test case that attempts to invoke a function on receiver of a given type.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}(String s, {farg} v) {{
+# try {{
+# System.out.printf("%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n", s, v.{callfunc}());
+# return;
+# }} catch (Error e) {{
+# System.out.printf("%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n", s);
+# e.printStackTrace(System.out);
+# }}
+# }}
+.method public static {fname}(Ljava/lang/String;L{farg};)V
+ .locals 7
+ :call_{fname}_try_start
+ const/4 v0, 2
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ aput-object p0,v1,v0
+
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n"
+
+ invoke-{invoke_type} {{p1}}, L{farg};->{callfunc}()Ljava/lang/String;
+ move-result-object v4
+ const/4 v0, 1
+ aput-object v4, v1, v0
+
+ invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ const/4 v0, 1
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ aput-object p0, v1, v0
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v4, "%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n"
+ invoke-virtual {{v2,v4,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ def __init__(self, func, farg, invoke):
+ """
+ Initialize this test function for the given invoke type and argument
+ """
+ self.func = func
+ self.farg = farg
+ self.invoke = invoke
+
+ def get_name(self):
+ """
+ Get the name of this test
+ """
+ return "Test_Func_{}_{}_{}".format(self.func, self.farg, self.invoke)
+
+ def __str__(self):
+ """
+ Get the smali code for this test function
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(),
+ farg=self.farg,
+ invoke_type=self.invoke,
+ callfunc=self.func)
+
+def flatten_classes(classes, c):
+ """
+ Iterate over all the classes 'c' can be used as
+ """
+ while c:
+ yield c
+ c = classes.get(c.super_class)
+
+def flatten_class_methods(classes, c):
+ """
+ Iterate over all the methods 'c' can call
+ """
+ for c1 in flatten_classes(classes, c):
+ yield from c1.methods
+
+def flatten_interfaces(dat, c):
+ """
+ Iterate over all the interfaces 'c' transitively implements
+ """
+ def get_ifaces(cl):
+ for i2 in cl.implements:
+ yield dat.interfaces[i2]
+ yield from get_ifaces(dat.interfaces[i2])
+
+ for cl in flatten_classes(dat.classes, c):
+ yield from get_ifaces(cl)
+
+def flatten_interface_methods(dat, i):
+ """
+ Iterate over all the interface methods 'c' can call
+ """
+ yield from i.methods
+ for i2 in flatten_interfaces(dat, i):
+ yield from i2.methods
+
+def make_main_class(dat):
+ """
+ Creates a Main.smali file that runs all the tests
+ """
+ m = MainClass()
+ for c in dat.classes.values():
+ i = InstanceTest(m, c.name)
+ for clazz in flatten_classes(dat.classes, c):
+ for meth in flatten_class_methods(dat.classes, clazz):
+ i.add_func(Func(meth, clazz.name, 'virtual'))
+ for iface in flatten_interfaces(dat, clazz):
+ for meth in flatten_interface_methods(dat, iface):
+ i.add_func(Func(meth, clazz.name, 'virtual'))
+ i.add_func(Func(meth, iface.name, 'interface'))
+ return m
+
+class TestData(namedtuple("TestData", ['classes', 'interfaces'])):
+ """
+ A class representing the classes.xml document.
+ """
+ pass
+
+class Clazz(namedtuple("Clazz", ["name", "methods", "super_class", "implements"])):
+ """
+ A class representing a class element in the classes.xml document.
+ """
+ pass
+
+class IFace(namedtuple("IFace", ["name", "methods", "super_class", "implements"])):
+ """
+ A class representing an interface element in the classes.xml document.
+ """
+ pass
+
+def parse_xml(xml):
+ """
+ Parse the xml description of this test.
+ """
+ classes = dict()
+ ifaces = dict()
+ root = ET.fromstring(xml)
+ for iface in root.find("interfaces"):
+ name = iface.attrib['name']
+ implements = [a.text for a in iface.find("implements")]
+ methods = [a.text for a in iface.find("methods")]
+ ifaces[name] = IFace(name = name,
+ super_class = iface.attrib['super'],
+ methods = methods,
+ implements = implements)
+ for clazz in root.find('classes'):
+ name = clazz.attrib['name']
+ implements = [a.text for a in clazz.find("implements")]
+ methods = [a.text for a in clazz.find("methods")]
+ classes[name] = Clazz(name = name,
+ super_class = clazz.attrib['super'],
+ methods = methods,
+ implements = implements)
+ return TestData(classes, ifaces)
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ class_data = parse_xml((smali_dir / "classes.xml").open().read())
+ make_main_class(class_data).dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
new file mode 100755
index 0000000000..707c17e1cf
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/build
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+mkdir -p ./smali
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# Generate the smali files and expected.txt or fail
+./util-src/generate_smali.py ./smali ./expected.txt
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/961-default-iface-resolution-generated/expected.txt b/test/961-default-iface-resolution-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/961-default-iface-resolution-generated/info.txt b/test/961-default-iface-resolution-generated/info.txt
new file mode 100644
index 0000000000..2cd2cc75b7
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for experimental interface default methods.
+
+This tests that interface method resolution order is correct.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the
+$(ANDROID_BUILD_TOP)/art/tools/extract-embedded-java script to turn the smali
+into equivalent Java using the embedded Java code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/961-default-iface-resolution-generated/util-src/generate_smali.py b/test/961-default-iface-resolution-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..921a096dd3
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/util-src/generate_smali.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 961.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the type tree can have. Includes the class object in the tree.
+# Increasing this increases the number of generated files significantly. This
+# value was chosen as it is fairly quick to run and very comprehensive, checking
+# every possible interface tree up to 5 layers deep.
+MAX_IFACE_DEPTH = 5
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the MainClass smali code.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("smali"),
+ test_groups = test_groups,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# try {{
+# {farg} v = new {farg}();
+# System.out.printf("%s calls default method on %s\\n",
+# v.CalledClassName(),
+# v.CalledInterfaceName());
+# return;
+# }} catch (Error e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ :call_{fname}_try_start
+ new-instance v6, L{farg};
+ invoke-direct {{v6}}, L{farg};-><init>()V
+
+ const/4 v0, 2
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ invoke-virtual {{v6}}, L{farg};->CalledClassName()Ljava/lang/String;
+ move-result-object v4
+ aput-object v4,v1,v0
+
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "%s calls default method on %s\\n"
+
+ invoke-virtual {{v6}}, L{farg};->CalledInterfaceName()Ljava/lang/String;
+ move-result-object v4
+ const/4 v0, 1
+ aput-object v4, v1, v0
+
+ invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def get_expected(self):
+ """
+ Get the expected output calling this function.
+ """
+ return "{tree} calls default method on {iface_tree}".format(
+ tree = self.farg.get_tree(), iface_tree = self.farg.get_called().get_tree())
+
+ def get_name(self):
+ """
+ Get the name of this function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def __str__(self):
+ """
+ Print the smali code of this function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(), farg=self.farg.get_name())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test default method resolution order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+.implements L{iface_name};
+
+# public class {class_name} implements {iface_name} {{
+# public String CalledClassName() {{
+# return "{tree}";
+# }}
+# }}
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public CalledClassName()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{tree}"
+ return-object v0
+.end method
+"""
+
+ def __init__(self, iface):
+ """
+ Initialize this test class which implements the given interface
+ """
+ self.iface = iface
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iface_tree}]".format(class_name = self.class_name,
+ iface_tree = self.iface.get_tree())
+
+ def __iter__(self):
+ """
+ Step through all interfaces implemented transitively by this class
+ """
+ yield self.iface
+ yield from self.iface
+
+ def get_called(self):
+ """
+ Get the interface whose default method would be called when calling the
+ CalledInterfaceName function.
+ """
+ all_ifaces = set(iface for iface in self if iface.default)
+ for i in all_ifaces:
+ if all(map(lambda j: i not in j.get_super_types(), all_ifaces)):
+ return i
+ raise Exception("UNREACHABLE! Unable to find default method!")
+
+ def __str__(self):
+ """
+ Print the smali code of this class.
+ """
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ iface_name = self.iface.get_name(),
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+# public String CalledClassName();
+.method public abstract CalledClassName()Ljava/lang/String;
+.end method
+
+{funcs}
+
+# }}
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default String CalledInterfaceName() {{
+# return "{tree}";
+# }}
+.method public CalledInterfaceName()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{tree}"
+ return-object v0
+.end method
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ def __init__(self, ifaces, default):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = sorted(ifaces)
+ self.default = default
+ end = "_DEFAULT" if default else ""
+ self.class_name = "INTERFACE_"+gensym()+end
+
+ def get_super_types(self):
+ """
+ Returns a set of all the supertypes of this interface
+ """
+ return set(i2 for i2 in self)
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def __str__(self):
+ """
+ Print the smali code of this interface.
+ """
+ s_ifaces = " "
+ j_ifaces = " "
+ for i in self.ifaces:
+ s_ifaces += self.IMPLEMENTS_TEMPLATE.format(iface_name = i.get_name())
+ j_ifaces += " {},".format(i.get_name())
+ j_ifaces = j_ifaces[0:-1]
+ if self.default:
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(ifaces = j_ifaces,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def print_tree(ifaces):
+ """
+ Prints a list of iface trees
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+# The deduplicated output of subtree_sizes for each size up to
+# MAX_LEAF_IFACE_PER_OBJECT.
+SUBTREES = [set(tuple(sorted(l)) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_interface_trees():
+ """
+ Return all legal interface trees
+ """
+ def dump_supers(s):
+ """
+ Does depth first traversal of all the interfaces in the list.
+ """
+ for i in s:
+ yield i
+ yield from i
+
+ def create_interface_trees_inner(num, allow_default):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ if sub == 1:
+ ifaces.append([TestInterface([], allow_default)])
+ if allow_default:
+ ifaces[-1].append(TestInterface([], False))
+ else:
+ ifaces.append(list(create_interface_trees_inner(sub, allow_default)))
+ for supers in itertools.product(*ifaces):
+ all_supers = sorted(set(dump_supers(supers)) - set(supers))
+ for i in range(len(all_supers) + 1):
+ for combo in itertools.combinations(all_supers, i):
+ yield TestInterface(list(combo) + list(supers), allow_default)
+ if allow_default:
+ for i in range(len(split)):
+ ifaces = []
+ for sub, cs in zip(split, itertools.count()):
+ if sub == 1:
+ ifaces.append([TestInterface([], i == cs)])
+ else:
+ ifaces.append(list(create_interface_trees_inner(sub, i == cs)))
+ for supers in itertools.product(*ifaces):
+ all_supers = sorted(set(dump_supers(supers)) - set(supers))
+ for i in range(len(all_supers) + 1):
+ for combo in itertools.combinations(all_supers, i):
+ yield TestInterface(list(combo) + list(supers), False)
+
+ for num in range(1, MAX_IFACE_DEPTH):
+ yield from create_interface_trees_inner(num, True)
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for tree in create_interface_trees():
+ classes.add(tree)
+ for i in tree:
+ classes.add(i)
+ test_class = TestClass(tree)
+ mc.add_test(test_class)
+ classes.add(test_class)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
new file mode 100755
index 0000000000..5ad82f70d1
--- /dev/null
+++ b/test/962-iface-static/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/expected.txt b/test/962-iface-static/expected.txt
new file mode 100644
index 0000000000..6d98ea1571
--- /dev/null
+++ b/test/962-iface-static/expected.txt
@@ -0,0 +1,3 @@
+init
+constructor
+Hello
diff --git a/test/962-iface-static/info.txt b/test/962-iface-static/info.txt
new file mode 100644
index 0000000000..d4732e533d
--- /dev/null
+++ b/test/962-iface-static/info.txt
@@ -0,0 +1,4 @@
+Smali-based tests for experimental interface static methods.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
new file mode 100755
index 0000000000..e713708c18
--- /dev/null
+++ b/test/962-iface-static/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/962-iface-static/smali/Displayer.smali b/test/962-iface-static/smali/Displayer.smali
new file mode 100644
index 0000000000..06bec16432
--- /dev/null
+++ b/test/962-iface-static/smali/Displayer.smali
@@ -0,0 +1,45 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Displayer {
+# static {
+# System.out.println("init");
+# }
+#
+# public Displayer() {
+# System.out.println("constructor");
+# }
+# }
+
+.class public LDisplayer;
+.super Ljava/lang/Object;
+
+.method public static <clinit>()V
+ .locals 3
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "init"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+
+.method public constructor <init>()V
+ .locals 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "constructor"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
diff --git a/test/962-iface-static/smali/Main.smali b/test/962-iface-static/smali/Main.smali
new file mode 100644
index 0000000000..72fa5e0e6e
--- /dev/null
+++ b/test/962-iface-static/smali/Main.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main {
+# public static void main(String[] args) {
+# System.out.println(iface.SayHi());
+# }
+# }
+.class public LMain;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ invoke-static {}, Liface;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/962-iface-static/smali/iface.smali b/test/962-iface-static/smali/iface.smali
new file mode 100644
index 0000000000..441aae669e
--- /dev/null
+++ b/test/962-iface-static/smali/iface.smali
@@ -0,0 +1,43 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface {
+# public static final Displayer f = new Displayer();
+#
+# public static String SayHi() {
+# return "Hello";
+# }
+# }
+
+.class public abstract interface Liface;
+.super Ljava/lang/Object;
+
+.field public final static f:LDisplayer;
+
+.method public static <clinit>()V
+ .locals 3
+ new-instance v1, LDisplayer;
+ invoke-direct {v1}, LDisplayer;-><init>()V
+ sput-object v1, Liface;->f:LDisplayer;
+ return-void
+.end method
+
+.method public static SayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hello"
+ return-object v0
+.end method
+
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
new file mode 100755
index 0000000000..5ad82f70d1
--- /dev/null
+++ b/test/963-default-range-smali/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/expected.txt b/test/963-default-range-smali/expected.txt
new file mode 100644
index 0000000000..af17d2f873
--- /dev/null
+++ b/test/963-default-range-smali/expected.txt
@@ -0,0 +1,2 @@
+Hello
+Hello
diff --git a/test/963-default-range-smali/info.txt b/test/963-default-range-smali/info.txt
new file mode 100644
index 0000000000..d4732e533d
--- /dev/null
+++ b/test/963-default-range-smali/info.txt
@@ -0,0 +1,4 @@
+Smali-based tests for experimental interface static methods.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
new file mode 100755
index 0000000000..e713708c18
--- /dev/null
+++ b/test/963-default-range-smali/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/963-default-range-smali/smali/A.smali b/test/963-default-range-smali/smali/A.smali
new file mode 100644
index 0000000000..b3d91dd76b
--- /dev/null
+++ b/test/963-default-range-smali/smali/A.smali
@@ -0,0 +1,29 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LA;
+.super Ljava/lang/Object;
+.implements Liface;
+
+# class A implements iface {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
diff --git a/test/963-default-range-smali/smali/Main.smali b/test/963-default-range-smali/smali/Main.smali
new file mode 100644
index 0000000000..400fba72d9
--- /dev/null
+++ b/test/963-default-range-smali/smali/Main.smali
@@ -0,0 +1,77 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main {
+# public static void main(String[] args) {
+# A a = new A();
+# System.out.println(a.SayHi("a string 0",
+# "a string 1",
+# "a string 2",
+# "a string 3",
+# "a string 4",
+# "a string 5",
+# "a string 6",
+# "a string 7",
+# "a string 8",
+# "a string 9"));
+# iface b = (iface)a;
+# System.out.println(b.SayHi("a string 0",
+# "a string 1",
+# "a string 2",
+# "a string 3",
+# "a string 4",
+# "a string 5",
+# "a string 6",
+# "a string 7",
+# "a string 8",
+# "a string 9"));
+# }
+# }
+.class public LMain;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 15
+ sget-object v12, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ new-instance v1, LA;
+ invoke-direct {v1}, LA;-><init>()V
+ const-string v2, "a string 0"
+ const-string v3, "a string 1"
+ const-string v4, "a string 2"
+ const-string v5, "a string 3"
+ const-string v6, "a string 4"
+ const-string v7, "a string 5"
+ const-string v8, "a string 6"
+ const-string v9, "a string 7"
+ const-string v10, "a string 8"
+ const-string v11, "a string 9"
+ invoke-virtual/range {v1 .. v11}, LA;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface/range {v1 .. v11}, Liface;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/963-default-range-smali/smali/iface.smali b/test/963-default-range-smali/smali/iface.smali
new file mode 100644
index 0000000000..c2c3ce69a7
--- /dev/null
+++ b/test/963-default-range-smali/smali/iface.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface {
+# public default String SayHi(String n1,
+# String n2,
+# String n3,
+# String n4,
+# String n5,
+# String n6,
+# String n7,
+# String n8,
+# String n9,
+# String n0) {
+# return "Hello";
+# }
+# }
+
+.class public abstract interface Liface;
+.super Ljava/lang/Object;
+
+.method public SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hello"
+ return-object v0
+.end method
+
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
new file mode 100755
index 0000000000..deef803813
--- /dev/null
+++ b/test/964-default-iface-init-generated/build
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# Generate the smali files and expected.txt or fail
+./util-src/generate_smali.py ./smali ./expected.txt
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/964-default-iface-init-generated/expected.txt b/test/964-default-iface-init-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/964-default-iface-init-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/964-default-iface-init-generated/info.txt b/test/964-default-iface-init-generated/info.txt
new file mode 100644
index 0000000000..5805a86854
--- /dev/null
+++ b/test/964-default-iface-init-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for interface initialization.
+
+This tests that interface initialization order is correct.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the
+$(ANDROID_BUILD_TOP)/art/tools/extract-embedded-java script to turn the smali
+into equivalent Java using the embedded Java code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/964-default-iface-init-generated/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/964-default-iface-init-generated/smali/Displayer.smali b/test/964-default-iface-init-generated/smali/Displayer.smali
new file mode 100644
index 0000000000..91280a8a42
--- /dev/null
+++ b/test/964-default-iface-init-generated/smali/Displayer.smali
@@ -0,0 +1,45 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# // This class is b/c java does not allow static {} blocks in interfaces.
+# public class Displayer {
+# public Displayer(String type) {
+# System.out.println("initialization of " + type);
+# }
+# public void touch() {
+# return;
+# }
+# }
+
+.class public LDisplayer;
+.super Ljava/lang/Object;
+
+.method public constructor <init>(Ljava/lang/String;)V
+ .locals 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ const-string v0, "initialization of "
+ invoke-virtual {v0, p1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+
+.method public touch()V
+ .locals 0
+ return-void
+.end method
+
diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..be2d3ba563
--- /dev/null
+++ b/test/964-default-iface-init-generated/util-src/generate_smali.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 964.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the tree can have.
+MAX_IFACE_DEPTH = 3
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def get_name(self):
+ """
+ Gets the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the smali code for this test.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ test_groups = test_groups,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# try {{
+# System.out.println("About to initialize {tree}");
+# {farg} v = new {farg}();
+# System.out.println("Initialized {tree}");
+# v.touchAll();
+# System.out.println("All of {tree} hierarchy initialized");
+# return;
+# }} catch (Error e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ :call_{fname}_try_start
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "About to initialize {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ new-instance v6, L{farg};
+ invoke-direct {{v6}}, L{farg};-><init>()V
+
+ const-string v3, "Initialized {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {{v6}}, L{farg};->touchAll()V
+
+ const-string v3, "All of {tree} hierarchy initialized"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ OUTPUT_FORMAT = """
+About to initialize {tree}
+{initialize_output}
+Initialized {tree}
+{touch_output}
+All of {tree} hierarchy initialized
+""".strip()
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def __str__(self):
+ """
+ Print the smali code for this test function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(),
+ farg=self.farg.get_name(),
+ tree = self.farg.get_tree())
+
+ def get_name(self):
+ """
+ Gets the name of this test function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def get_expected(self):
+ """
+ Get the expected output of this function.
+ """
+ return self.OUTPUT_FORMAT.format(
+ tree = self.farg.get_tree(),
+ initialize_output = self.farg.get_initialize_output().strip(),
+ touch_output = self.farg.get_touch_output().strip())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test interface initialization order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public class {class_name} implements {ifaces} {{
+#
+# public {class_name}() {{
+# }}
+.method public constructor <init>()V
+ .locals 2
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+# public void marker() {{
+# return;
+# }}
+.method public marker()V
+ .locals 0
+ return-void
+.end method
+
+# public void touchAll() {{
+.method public touchAll()V
+ .locals 2
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ {touch_calls}
+ return-void
+.end method
+# }}
+# }}
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ TOUCH_CALL_TEMPLATE = """
+# System.out.println("{class_name} touching {iface_name}");
+# {iface_name}.field.touch();
+ const-string v1, "{class_name} touching {iface_name}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ sget-object v1, L{iface_name};->field:LDisplayer;
+ invoke-virtual {{v1}}, LDisplayer;->touch()V
+"""
+
+ TOUCH_OUTPUT_TEMPLATE = """
+{class_name} touching {iface_name}
+{touch_output}
+""".strip()
+
+ def __init__(self, ifaces):
+ """
+ Initialize this test class which implements the given interfaces
+ """
+ self.ifaces = ifaces
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{fname} {iftree}]".format(fname = self.get_name(), iftree = print_tree(self.ifaces))
+
+ def get_initialize_output(self):
+ return "\n".join(map(lambda i: i.get_initialize_output().strip(), dump_tree(self.ifaces)))
+
+ def get_touch_output(self):
+ return "\n".join(map(lambda a: self.TOUCH_OUTPUT_TEMPLATE.format(
+ class_name = self.class_name,
+ iface_name = a.get_name(),
+ touch_output = a.get_touch_output()).strip(),
+ self.get_all_interfaces()))
+
+ def get_all_interfaces(self):
+ """
+ Returns a set of all interfaces this class transitively implements
+ """
+ return sorted(set(dump_tree(self.ifaces)))
+
+ def __str__(self):
+ """
+ Print the smali code for this class.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ touches = '\n'.join(map(lambda a: self.TOUCH_CALL_TEMPLATE.format(class_name = self.class_name,
+ iface_name = a.get_name()),
+ self.get_all_interfaces()))
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ ifaces = j_ifaces,
+ class_name = self.class_name,
+ touch_calls = touches)
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+# public static final Displayer field = new Displayer("{tree}");
+.field public final static field:LDisplayer;
+
+.method public static constructor <clinit>()V
+ .locals 3
+ const-string v2, "{tree}"
+ new-instance v1, LDisplayer;
+ invoke-direct {{v1, v2}}, LDisplayer;-><init>(Ljava/lang/String;)V
+ sput-object v1, L{class_name};->field:LDisplayer;
+ return-void
+.end method
+
+# public void marker();
+.method public abstract marker()V
+.end method
+
+{funcs}
+
+# }}
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default void {class_name}_DEFAULT_FUNC() {{
+# return;
+# }}
+.method public {class_name}_DEFAULT_FUNC()V
+ .locals 0
+ return-void
+.end method
+"""
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ OUTPUT_TEMPLATE = "initialization of {tree}"
+
+ def __init__(self, ifaces, default):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = ifaces
+ self.default = default
+ end = "_DEFAULT" if default else ""
+ self.class_name = "INTERFACE_"+gensym()+end
+ self.cloned = False
+ self.initialized = False
+
+ def clone(self):
+ """
+ Clones this interface, returning a new one with the same structure but
+ different name.
+ """
+ return TestInterface(tuple(map(lambda a: a.clone(), self.ifaces)), self.default)
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def get_initialize_output(self):
+ """
+ Returns the expected output upon the class that implements this interface being initialized.
+ """
+ if self.default and not self.initialized:
+ self.initialized = True
+ return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
+ else:
+ return ""
+
+ def get_touch_output(self):
+ """
+ Returns the expected output upon this interface being touched.
+ """
+ if not self.default and not self.initialized:
+ self.initialized = True
+ return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
+ else:
+ return ""
+
+ def __str__(self):
+ """
+ Print the smali code for this interface.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ if self.default:
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(class_name = self.class_name)
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def dump_tree(ifaces):
+ """
+ Yields all the interfaces transitively implemented by the set in
+ reverse-depth-first order
+ """
+ for i in ifaces:
+ yield from dump_tree(i.ifaces)
+ yield i
+
+def print_tree(ifaces):
+ """
+ Prints the tree for the given ifaces.
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+def clone_all(l):
+ return tuple(a.clone() for a in l)
+
+# Cached output of subtree_sizes for speed of access.
+SUBTREES = [set(tuple(l) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_test_classes():
+ """
+ Yield all the test classes with the different interface trees
+ """
+ for num in range(1, MAX_IFACE_DEPTH + 1):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestClass(clone_all(supers))
+ for i in range(len(set(dump_tree(supers)) - set(supers))):
+ ns = clone_all(supers)
+ selected = sorted(set(dump_tree(ns)) - set(ns))[i]
+ yield TestClass(tuple([selected] + list(ns)))
+
+def create_interface_trees(num):
+ """
+ Yield all the interface trees up to 'num' depth.
+ """
+ if num == 0:
+ yield TestInterface(tuple(), False)
+ yield TestInterface(tuple(), True)
+ return
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestInterface(clone_all(supers), False)
+ yield TestInterface(clone_all(supers), True)
+ # TODO Should add on some from higher up the tree.
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for clazz in create_test_classes():
+ classes.add(clazz)
+ for i in dump_tree(clazz.ifaces):
+ classes.add(i)
+ mc.add_test(clazz)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 537873f0f0..e114a2e9f3 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -214,13 +214,29 @@ TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
055-enum-performance \
133-static-invoke-super
- # disable timing sensitive tests on "dist" builds.
+# disable timing sensitive tests on "dist" builds.
ifdef dist_goal
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
+# Tests that require python3.
+TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
+ 960-default-smali \
+ 961-default-iface-resolution-generated \
+ 964-default-iface-init-generated \
+
+# Check if we have python3 to run our tests.
+ifeq ($(wildcard /usr/bin/python3),)
+ $(warning "No python3 found. Disabling tests: $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS)")
+
+ # Currently disable tests requiring python3 when it is not installed.
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
@@ -313,13 +329,15 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUIL
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
-# in tests 138.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# in tests 138. Blacklisted for debug builds since these builds have duplicate classes checks which
+# punt to interpreter.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),debug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
-# 138-duplicate-classes-check. Turned off temporarily, b/21333911.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# 138-duplicate-classes-check. Turned on for debug builds since debug builds have duplicate classes
+# checks enabled, b/2133391.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))
diff --git a/test/etc/default-build b/test/etc/default-build
index c281bca3f5..c92402b529 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -26,6 +26,8 @@ while true; do
option="$1"
DX_FLAGS="${DX_FLAGS} $option"
shift
+ elif [ "x$1" = "x--jvm" ]; then
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
diff --git a/test/run-all-tests b/test/run-all-tests
index 13490c46e4..76283b7a8d 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -41,6 +41,9 @@ while true; do
if [ "x$1" = "x--host" ]; then
run_args="${run_args} --host"
shift
+ elif [ "x$1" = "x--use-java-home" ]; then
+ run_args="${run_args} --use-java-home"
+ shift
elif [ "x$1" = "x--jvm" ]; then
run_args="${run_args} --jvm"
shift
@@ -133,7 +136,7 @@ if [ "$usage" = "yes" ]; then
echo " --debug --dev --host --interpreter --jit --jvm --no-optimize"
echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
echo " --prebuild --always-clean --gcstress --gcverify --trace"
- echo " --no-patchoat --no-dex2oat"
+ echo " --no-patchoat --no-dex2oat --use-java-home"
echo " Specific Runtime Options:"
echo " --seq Run tests one-by-one, avoiding failures caused by busy CPU"
) 1>&2
diff --git a/test/run-test b/test/run-test
index 2892ce9f18..1b71f33209 100755
--- a/test/run-test
+++ b/test/run-test
@@ -40,7 +40,6 @@ else
tmp_dir="${TMPDIR}/$USER/${test_dir}"
fi
checker="${progdir}/../tools/checker/checker.py"
-
export JAVA="java"
export JAVAC="javac -g"
export RUN="${progdir}/etc/run-test-jar"
@@ -155,6 +154,15 @@ while true; do
DEX_LOCATION=$tmp_dir
run_args="${run_args} --host"
shift
+ elif [ "x$1" = "x--use-java-home" ]; then
+ if [ -n "${JAVA_HOME}" ]; then
+ export JAVA="${JAVA_HOME}/bin/java"
+ export JAVAC="${JAVA_HOME}/bin/javac -g"
+ else
+ echo "Passed --use-java-home without JAVA_HOME variable set!"
+ usage="yes"
+ fi
+ shift
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
runtime="jvm"
@@ -162,6 +170,7 @@ while true; do
NEED_DEX="false"
USE_JACK="false"
run_args="${run_args} --jvm"
+ build_args="${build_args} --jvm"
shift
elif [ "x$1" = "x-O" ]; then
lib="libart.so"
@@ -560,6 +569,9 @@ if [ "$usage" = "yes" ]; then
echo " --invoke-with Pass --invoke-with option to runtime."
echo " --dalvik Use Dalvik (off by default)."
echo " --jvm Use a host-local RI virtual machine."
+ echo " --use-java-home Use the JAVA_HOME environment variable"
+ echo " to find the java compiler and runtime"
+ echo " (if applicable) to run the test with."
echo " --output-path [path] Location where to store the build" \
"files."
echo " --64 Run the test in 64-bit mode"
diff --git a/test/utils/python/testgen/mixins.py b/test/utils/python/testgen/mixins.py
new file mode 100644
index 0000000000..085e51def2
--- /dev/null
+++ b/test/utils/python/testgen/mixins.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common mixins and abstract base classes (ABCs) useful for writing test generators in python
+"""
+
+import abc
+import collections.abc
+import functools
+
+class Named(metaclass=abc.ABCMeta):
+ """
+ An abc that defines a get_name method.
+ """
+
+ @abc.abstractmethod
+ def get_name(self):
+ """
+ Returns a unique name to use as the identity for implementing comparisons.
+ """
+ pass
+
+class FileLike(metaclass=abc.ABCMeta):
+ """
+ An abc that defines get_file_name and get_file_extension methods.
+ """
+
+ @abc.abstractmethod
+ def get_file_name(self):
+ """Returns the filename this object represents"""
+ pass
+
+ @abc.abstractmethod
+ def get_file_extension(self):
+ """Returns the file extension of the file this object represents"""
+ pass
+
+@functools.lru_cache(maxsize=None)
+def get_file_extension_mixin(ext):
+ """
+ Gets a mixin that defines get_file_name(self) in terms of get_name(self) with the
+ given file extension.
+ """
+
+ class FExt(object):
+ """
+ A mixin defining get_file_name(self) in terms of get_name(self)
+ """
+
+ def get_file_name(self):
+ return self.get_name() + ext
+
+ def get_file_extension(self):
+ return ext
+
+ # Register the ABCs
+ Named.register(FExt)
+ FileLike.register(FExt)
+
+ return FExt
+
+class SmaliFileMixin(get_file_extension_mixin(".smali")):
+ """
+ A mixin that defines that the file this class belongs to is get_name() + ".smali".
+ """
+ pass
+
+class NameComparableMixin(object):
+ """
+ A mixin that defines the object comparison and related functionality in terms
+ of a get_name(self) function.
+ """
+
+ def __lt__(self, other):
+ return self.get_name() < other.get_name()
+
+ def __gt__(self, other):
+ return self.get_name() > other.get_name()
+
+ def __eq__(self, other):
+ return self.get_name() == other.get_name()
+
+ def __le__(self, other):
+ return self.get_name() <= other.get_name()
+
+ def __ge__(self, other):
+ return self.get_name() >= other.get_name()
+
+ def __ne__(self, other):
+ return self.get_name() != other.get_name()
+
+ def __hash__(self):
+ return hash(self.get_name())
+
+Named.register(NameComparableMixin)
+collections.abc.Hashable.register(NameComparableMixin)
+
+class DumpMixin(metaclass=abc.ABCMeta):
+ """
+ A mixin to add support for dumping the string representation of an object to a
+ file. Requires the get_file_name(self) method be defined.
+ """
+
+ @abc.abstractmethod
+ def __str__(self):
+ """
+ Returns the data to be printed to a file by dump.
+ """
+ pass
+
+ def dump(self, directory):
+ """
+ Dump this object to a file in the given directory
+ """
+ out_file = directory / self.get_file_name()
+ if out_file.exists():
+ out_file.unlink()
+ with out_file.open('w') as out:
+ print(str(self), file=out)
+
+FileLike.register(DumpMixin)
diff --git a/test/utils/python/testgen/utils.py b/test/utils/python/testgen/utils.py
new file mode 100644
index 0000000000..769ad16ebe
--- /dev/null
+++ b/test/utils/python/testgen/utils.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common functions useful for writing test generators in python
+"""
+
+import itertools
+import os
+import string
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# An iterator which yields strings made from lowercase letters. First yields
+# all 1 length strings, then all 2 and so on. It does this alphabetically.
+NAME_GEN = itertools.chain.from_iterable(
+ map(lambda n: itertools.product(string.ascii_lowercase, repeat=n),
+ itertools.count(1)))
+
+def gensym():
+ """
+ Returns a new, globally unique, identifier name that is a valid Java symbol
+ on each call.
+ """
+ return ''.join(next(NAME_GEN))
+
+def filter_blanks(s):
+ """
+ Takes a string returns the same string sans empty lines
+ """
+ return "\n".join(a for a in s.split("\n") if a.strip() != "")
+
+def get_copyright(filetype = "java"):
+ """
+ Returns the standard copyright header for the given filetype
+ """
+ if filetype == "smali":
+ return "\n".join(map(lambda a: "# " + a, get_copyright("java").split("\n")))
+ else:
+ fname = filetype + ".txt"
+ with (Path(BUILD_TOP)/"development"/"docs"/"copyright-templates"/fname).open() as template:
+ return "".join(template.readlines())
+
+def subtree_sizes(n):
+ """
+ A generator that yields a tuple containing a possible arrangement of subtree
+ nodes for a tree with a total of 'n' leaf nodes.
+ """
+ if n == 0:
+ return
+ elif n == 1:
+ yield (0,)
+ elif n == 2:
+ yield (1, 1)
+ else:
+ for prevt in subtree_sizes(n - 1):
+ prev = list(prevt)
+ yield tuple([1] + prev)
+ for i in range(len(prev)):
+ prev[i] += 1
+ yield tuple(prev)
+ prev[i] -= 1
+
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index a8e3884077..1083c2f42f 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -31,9 +31,6 @@ TODO:
another) How about, always sort by name?
* For long strings, limit the string length shown in the summary view to
something reasonable. Say 50 chars, then add a "..." at the end.
- * For string summaries, if the string is an offset into a bigger byte array,
- make sure to show just the part that's in the bigger byte array, not the
- entire byte array.
* For HeapTable with single heap shown, the heap name isn't centered?
* Consistently document functions.
* Should help be part of an AhatHandler, that automatically gets the menu and
@@ -72,6 +69,8 @@ Things to Test:
time.
* That we don't show the 'extra' column in the DominatedList if we are
showing all the instances.
+ * That InstanceUtils.asString properly takes into account "offset" and
+ "count" fields, if they are present.
Reported Issues:
* Request to be able to sort tables by size.
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index a6ac3b8765..eb9e363d8c 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -37,22 +37,6 @@ class InstanceUtils {
}
/**
- * Read the char[] value from an hprof Instance.
- * Returns null if the object can't be interpreted as a char[].
- */
- private static char[] asCharArray(Instance inst) {
- if (! (inst instanceof ArrayInstance)) {
- return null;
- }
-
- ArrayInstance array = (ArrayInstance) inst;
- if (array.getArrayType() != Type.CHAR) {
- return null;
- }
- return array.asCharArray(0, array.getValues().length);
- }
-
- /**
* Read the byte[] value from an hprof Instance.
* Returns null if the instance is not a byte array.
*/
@@ -82,8 +66,32 @@ class InstanceUtils {
if (!isInstanceOfClass(inst, "java.lang.String")) {
return null;
}
- char[] value = getCharArrayField(inst, "value");
- return (value == null) ? null : new String(value);
+
+ Object value = getField(inst, "value");
+ if (!(value instanceof ArrayInstance)) {
+ return null;
+ }
+
+ ArrayInstance chars = (ArrayInstance) value;
+ if (chars.getArrayType() != Type.CHAR) {
+ return null;
+ }
+
+ // TODO: When perflib provides a better way to get the length of the
+ // array, we should use that here.
+ int numChars = chars.getValues().length;
+ int count = getIntField(inst, "count", numChars);
+ int offset = getIntField(inst, "offset", 0);
+ int end = offset + count - 1;
+
+ if (count == 0) {
+ return "";
+ }
+
+ if (offset >= 0 && offset < numChars && end >= 0 && end < numChars) {
+ return new String(chars.asCharArray(offset, count));
+ }
+ return null;
}
/**
@@ -175,6 +183,15 @@ class InstanceUtils {
}
/**
+ * Read an int field of an instance, returning a default value if the field
+ * was not an int or could not be read.
+ */
+ private static int getIntField(Instance inst, String fieldName, int def) {
+ Integer value = getIntField(inst, fieldName);
+ return value == null ? def : value;
+ }
+
+ /**
* Read the given field from the given instance.
* The field is assumed to be a byte[] field.
* Returns null if the field value is null, not a byte[] or could not be read.
@@ -187,14 +204,6 @@ class InstanceUtils {
return asByteArray((Instance)value);
}
- private static char[] getCharArrayField(Instance inst, String fieldName) {
- Object value = getField(inst, fieldName);
- if (!(value instanceof Instance)) {
- return null;
- }
- return asCharArray((Instance)value);
- }
-
// Return the bitmap instance associated with this object, or null if there
// is none. This works for android.graphics.Bitmap instances and their
// underlying Byte[] instances.
diff --git a/tools/extract-embedded-java b/tools/extract-embedded-java
new file mode 100755
index 0000000000..e966552af0
--- /dev/null
+++ b/tools/extract-embedded-java
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "$#" -ne "2" ]; then
+ echo "Usage: ./extract_embedded_java.sh smali_dir java_dir"
+ exit 1
+fi
+
+# Check the input and output are directories
+[[ -d "$1" ]] || exit 1
+[[ -d "$2" ]] || exit 1
+
+# For every file which has the file extension smali, set $f to be the name without
+# .smali and then:
+for f in `find "$1" -type f -name "*.smali" | xargs -n 1 -P 0 -i basename -s .smali \{\}`; do
+ # remove all lines except those starting with '# ', remove the '#' then print
+ # it to a file ${name}.java. Do this concurrently.
+ grep "^# " "$1/${f}.smali" | sed "s:# ::" > "${2}/${f}.java" &
+done
+
+# wait for all the files to be written
+wait