summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk11
-rw-r--r--build/Android.common_path.mk6
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--compiler/Android.mk2
-rw-r--r--compiler/common_compiler_test.cc1
-rw-r--r--compiler/compiler.h5
-rw-r--r--compiler/dex/local_value_numbering_test.cc40
-rw-r--r--compiler/dex/mir_graph.cc15
-rw-r--r--compiler/dex/pass.h13
-rw-r--r--compiler/dex/pass_driver.h3
-rw-r--r--compiler/dex/quick/arm/int_arm.cc33
-rw-r--r--compiler/dex/quick/arm/target_arm.cc3
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc29
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc51
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc3
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc29
-rw-r--r--compiler/dex/quick/codegen_util.cc7
-rw-r--r--compiler/dex/quick/gen_common.cc8
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc36
-rw-r--r--compiler/dex/quick/mips/call_mips.cc5
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc23
-rw-r--r--compiler/dex/quick/mips/int_mips.cc94
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc36
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rw-r--r--compiler/dex/quick/mir_to_lir.h3
-rw-r--r--compiler/dex/quick/quick_compiler.cc4
-rw-r--r--compiler/dex/quick/ralloc_util.cc9
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc5
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc12
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc72
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc13
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc24
-rw-r--r--compiler/dex/verification_results.cc1
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc17
-rw-r--r--compiler/jit/jit_compiler.cc189
-rw-r--r--compiler/jit/jit_compiler.h17
-rw-r--r--compiler/jni/jni_cfi_test_expected.inc83
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc2
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc5
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc3
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.cc1
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2_test.cc1
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.cc1
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64_test.cc1
-rw-r--r--compiler/linker/relative_patcher_test.h1
-rw-r--r--compiler/oat_writer.cc56
-rw-r--r--compiler/oat_writer.h11
-rw-r--r--compiler/optimizing/boolean_simplifier.cc31
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc27
-rw-r--r--compiler/optimizing/builder.cc21
-rw-r--r--compiler/optimizing/code_generator.cc15
-rw-r--r--compiler/optimizing/code_generator_arm.cc136
-rw-r--r--compiler/optimizing/code_generator_arm64.cc59
-rw-r--r--compiler/optimizing/code_generator_mips.cc4186
-rw-r--r--compiler/optimizing/code_generator_mips.h362
-rw-r--r--compiler/optimizing/code_generator_mips64.cc110
-rw-r--r--compiler/optimizing/code_generator_mips64.h3
-rw-r--r--compiler/optimizing/code_generator_x86.cc366
-rw-r--r--compiler/optimizing/code_generator_x86.h7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc244
-rw-r--r--compiler/optimizing/code_generator_x86_64.h7
-rw-r--r--compiler/optimizing/codegen_test.cc143
-rw-r--r--compiler/optimizing/common_arm64.h4
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.cc132
-rw-r--r--compiler/optimizing/constant_folding_test.cc2
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/gvn_test.cc20
-rw-r--r--compiler/optimizing/induction_var_analysis.cc7
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc34
-rw-r--r--compiler/optimizing/induction_var_range.cc167
-rw-r--r--compiler/optimizing/induction_var_range.h48
-rw-r--r--compiler/optimizing/induction_var_range_test.cc140
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/instruction_simplifier.cc19
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc779
-rw-r--r--compiler/optimizing/intrinsics_x86.cc8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc8
-rw-r--r--compiler/optimizing/licm_test.cc2
-rw-r--r--compiler/optimizing/nodes.cc59
-rw-r--r--compiler/optimizing/nodes.h251
-rw-r--r--compiler/optimizing/nodes_test.cc15
-rw-r--r--compiler/optimizing/nodes_x86.h39
-rw-r--r--compiler/optimizing/optimizing_compiler.cc30
-rw-r--r--compiler/optimizing/reference_type_propagation.cc23
-rw-r--r--compiler/optimizing/register_allocator.cc9
-rw-r--r--compiler/optimizing/register_allocator_test.cc27
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/assembler_test.h48
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc20
-rw-r--r--compiler/utils/mips64/assembler_mips64.h5
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc88
-rw-r--r--compiler/utils/x86/assembler_x86.cc24
-rw-r--r--compiler/utils/x86/assembler_x86.h103
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc21
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h69
-rw-r--r--dex2oat/dex2oat.cc87
-rw-r--r--disassembler/disassembler_mips.cc29
-rw-r--r--oatdump/oatdump.cc29
-rw-r--r--runtime/Android.mk2
-rw-r--r--runtime/arch/arch_test.cc2
-rw-r--r--runtime/arch/arm/context_arm.cc10
-rw-r--r--runtime/arch/arm/context_arm.h2
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S105
-rw-r--r--runtime/arch/arm64/context_arm64.cc10
-rw-r--r--runtime/arch/arm64/context_arm64.h2
-rw-r--r--runtime/arch/context.h14
-rw-r--r--runtime/arch/instruction_set.h16
-rw-r--r--runtime/arch/mips/context_mips.cc9
-rw-r--r--runtime/arch/mips/context_mips.h2
-rw-r--r--runtime/arch/mips64/context_mips64.cc9
-rw-r--r--runtime/arch/mips64/context_mips64.h2
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S69
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S3
-rw-r--r--runtime/arch/x86/context_x86.cc12
-rw-r--r--runtime/arch/x86/context_x86.h2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S1
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc10
-rw-r--r--runtime/arch/x86_64/context_x86_64.h2
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S1
-rw-r--r--runtime/art_code.cc333
-rw-r--r--runtime/art_code.h93
-rw-r--r--runtime/art_method.cc24
-rw-r--r--runtime/art_method.h6
-rw-r--r--runtime/asm_support.h63
-rw-r--r--runtime/base/allocator.cc6
-rw-r--r--runtime/base/allocator.h7
-rw-r--r--runtime/base/arena_allocator.h10
-rw-r--r--runtime/base/arena_containers.h4
-rw-r--r--runtime/base/debug_stack.h4
-rw-r--r--runtime/base/hash_set.h39
-rw-r--r--runtime/base/hash_set_test.cc18
-rw-r--r--runtime/base/scoped_arena_allocator.h2
-rw-r--r--runtime/base/scoped_arena_containers.h4
-rw-r--r--runtime/base/stringpiece.h17
-rw-r--r--runtime/check_reference_map_visitor.h8
-rw-r--r--runtime/class_linker.cc151
-rw-r--r--runtime/class_linker.h22
-rw-r--r--runtime/class_table-inl.h3
-rw-r--r--runtime/class_table.cc15
-rw-r--r--runtime/class_table.h34
-rw-r--r--runtime/debugger.cc6
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc48
-rw-r--r--runtime/entrypoints/entrypoint_utils.h6
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc21
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc6
-rw-r--r--runtime/exception_test.cc11
-rw-r--r--runtime/fault_handler.cc11
-rw-r--r--runtime/gc/accounting/remembered_set.cc3
-rw-r--r--runtime/gc/allocator/dlmalloc.cc9
-rw-r--r--runtime/gc/allocator/rosalloc.h31
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc3
-rw-r--r--runtime/gc/space/malloc_space.cc2
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h4
-rw-r--r--runtime/gc/space/zygote_space.cc3
-rw-r--r--runtime/gc/task_processor_test.cc3
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc6
-rw-r--r--runtime/intern_table.cc8
-rw-r--r--runtime/intern_table.h1
-rw-r--r--runtime/interpreter/interpreter_common.cc16
-rw-r--r--runtime/jdwp/jdwp_handler.cc21
-rw-r--r--runtime/jit/jit_code_cache.cc177
-rw-r--r--runtime/jit/jit_code_cache.h68
-rw-r--r--runtime/jit/jit_code_cache_test.cc106
-rw-r--r--runtime/jni_env_ext.cc3
-rw-r--r--runtime/jni_internal.cc5
-rw-r--r--runtime/jvalue.h8
-rw-r--r--runtime/leb128_test.cc2
-rw-r--r--runtime/mirror/array-inl.h3
-rw-r--r--runtime/mirror/class.cc73
-rw-r--r--runtime/mirror/dex_cache_test.cc25
-rw-r--r--runtime/monitor.cc4
-rw-r--r--runtime/monitor.h4
-rw-r--r--runtime/monitor_android.cc4
-rw-r--r--runtime/monitor_linux.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc25
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc3
-rw-r--r--runtime/oat.cc17
-rw-r--r--runtime/oat.h25
-rw-r--r--runtime/oat_file-inl.h1
-rw-r--r--runtime/oat_file.cc173
-rw-r--r--runtime/oat_file.h12
-rw-r--r--runtime/oat_file_assistant.cc41
-rw-r--r--runtime/oat_file_assistant.h4
-rw-r--r--runtime/oat_file_assistant_test.cc4
-rw-r--r--runtime/oat_file_manager.cc30
-rw-r--r--runtime/oat_quick_method_header.cc139
-rw-r--r--runtime/oat_quick_method_header.h127
-rw-r--r--runtime/quick/quick_method_frame_info.h4
-rw-r--r--runtime/quick_exception_handler.cc195
-rw-r--r--runtime/quick_exception_handler.h6
-rw-r--r--runtime/reflection_test.cc3
-rw-r--r--runtime/runtime.cc19
-rw-r--r--runtime/runtime.h6
-rw-r--r--runtime/runtime_linux.cc2
-rw-r--r--runtime/stack.cc203
-rw-r--r--runtime/stack.h27
-rw-r--r--runtime/thread.cc62
-rw-r--r--runtime/thread.h18
-rw-r--r--runtime/trace.cc29
-rw-r--r--runtime/utils.cc24
-rw-r--r--runtime/utils.h11
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h15
-rw-r--r--runtime/utils/dex_cache_arrays_layout.h5
-rw-r--r--runtime/verifier/register_line.cc126
-rw-r--r--sigchainlib/sigchain.cc8
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc26
-rw-r--r--test/079-phantom/src/Bitmap.java5
-rw-r--r--test/087-gc-after-link/src/Main.java1
-rw-r--r--test/088-monitor-verification/smali/NullLocks.smali28
-rw-r--r--test/088-monitor-verification/src/Main.java2
-rw-r--r--test/088-monitor-verification/src/TwoPath.java2
-rw-r--r--test/115-native-bridge/nativebridge.cc31
-rw-r--r--test/131-structural-change/expected.txt1
-rw-r--r--test/131-structural-change/src/Main.java6
-rw-r--r--test/450-checker-types/src/Main.java11
-rw-r--r--test/454-get-vreg/get_vreg_jni.cc10
-rw-r--r--test/457-regs/regs_jni.cc4
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc4
-rw-r--r--test/539-checker-arm64-encodable-immediates/expected.txt0
-rw-r--r--test/539-checker-arm64-encodable-immediates/info.txt2
-rw-r--r--test/539-checker-arm64-encodable-immediates/src/Main.java52
-rw-r--r--test/540-checker-rtp-bug/expected.txt1
-rw-r--r--test/540-checker-rtp-bug/info.txt1
-rw-r--r--test/540-checker-rtp-bug/src/Main.java102
-rw-r--r--test/Android.run-test.mk50
-rwxr-xr-xtest/run-test13
-rw-r--r--tools/ahat/Android.mk2
-rw-r--r--tools/checker/common/archs.py2
-rw-r--r--tools/libcore_failures.txt7
232 files changed, 10159 insertions, 2490 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index b50712429e..cd9d18d578 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -34,6 +34,10 @@ ART_BUILD_TARGET_DEBUG ?= true
ART_BUILD_HOST_NDEBUG ?= true
ART_BUILD_HOST_DEBUG ?= true
+# Set this to change what opt level Art is built at.
+ART_DEBUG_OPT_FLAG ?= -O2
+ART_NDEBUG_OPT_FLAG ?= -O3
+
# Enable the static builds only for checkbuilds.
ifneq (,$(filter checkbuild,$(MAKECMDGOALS)))
ART_BUILD_HOST_STATIC ?= true
@@ -110,7 +114,8 @@ ART_TARGET_CLANG := $(USE_CLANG_PLATFORM_BUILD)
else
ART_TARGET_CLANG := false
endif
-ART_TARGET_CLANG_arm :=
+# b/25130937
+ART_TARGET_CLANG_arm := false
ART_TARGET_CLANG_arm64 :=
ART_TARGET_CLANG_mips :=
ART_TARGET_CLANG_mips64 :=
@@ -319,11 +324,11 @@ endif
# Cflags for non-debug ART and ART tools.
art_non_debug_cflags := \
- -O3
+ $(ART_NDEBUG_OPT_FLAG)
# Cflags for debug ART and ART tools.
art_debug_cflags := \
- -O2 \
+ $(ART_DEBUG_OPT_FLAG) \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
-DVIXL_DEBUG \
-UNDEBUG
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index a561c5f4c5..4abd191f54 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -90,6 +90,8 @@ HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
# Classpath for Jack compilation: we only need core-libart.
-HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
-TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
+HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
+HOST_JACK_CLASSPATH := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
+TARGET_JACK_CLASSPATH := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 70c9dc1cd3..6295e1527b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -65,6 +65,7 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
+ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
@@ -203,7 +204,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/interpreter/safe_math_test.cc \
runtime/interpreter/unstarted_runtime_test.cc \
runtime/java_vm_ext_test.cc \
- runtime/jit/jit_code_cache_test.cc \
runtime/lambda/closure_test.cc \
runtime/lambda/shorty_field_type_test.cc \
runtime/leb128_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 8e3b5550fb..960134f819 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -66,6 +66,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_utils.cc \
+ optimizing/constant_area_fixups_x86.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
@@ -151,6 +152,7 @@ LIBART_COMPILER_SRC_FILES_mips := \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
jni/quick/mips/calling_convention_mips.cc \
+ optimizing/code_generator_mips.cc \
utils/mips/assembler_mips.cc \
utils/mips/managed_register_mips.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 1727657d58..58a2f96cd9 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -32,6 +32,7 @@
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
#include "utils.h"
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 9b4dbe02e2..8788dc1950 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -77,9 +77,8 @@ class Compiler {
* information.
* @note This is used for backtrace information in generated code.
*/
- virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
- const {
- UNUSED(driver);
+ virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(
+ const CompilerDriver& driver ATTRIBUTE_UNUSED) const {
return nullptr;
}
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index bd00690270..f98969effd 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -797,6 +797,10 @@ TEST_F(LocalValueNumberingTest, DivZeroCheck) {
}
}
+static constexpr int64_t shift_minus_1(size_t by) {
+ return static_cast<int64_t>(static_cast<uint64_t>(INT64_C(-1)) << by);
+}
+
TEST_F(LocalValueNumberingTest, ConstWide) {
static const MIRDef mirs[] = {
// Core reg constants.
@@ -804,45 +808,45 @@ TEST_F(LocalValueNumberingTest, ConstWide) {
DEF_CONST(Instruction::CONST_WIDE_16, 2u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 4u, -1),
DEF_CONST(Instruction::CONST_WIDE_32, 6u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 8u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 8u, shift_minus_1(16)),
DEF_CONST(Instruction::CONST_WIDE_32, 10u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 12u, (1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE_32, 14u, -(1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 16u, -(1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE, 18u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 20u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 20u, shift_minus_1(32)),
DEF_CONST(Instruction::CONST_WIDE, 22u, (INT64_C(1) << 32) + 1),
DEF_CONST(Instruction::CONST_WIDE, 24u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 26u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 28u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 26u, shift_minus_1(32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 28u, shift_minus_1(32) - 1),
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 30u, 1), // Effectively 1 << 48.
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 32u, 0xffff), // Effectively -1 << 48.
DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 36u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 38u, shift_minus_1(48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 40u, shift_minus_1(48) - 1),
// FP reg constants.
DEF_CONST(Instruction::CONST_WIDE_16, 42u, 0),
DEF_CONST(Instruction::CONST_WIDE_16, 44u, 1),
DEF_CONST(Instruction::CONST_WIDE_16, 46u, -1),
DEF_CONST(Instruction::CONST_WIDE_32, 48u, 1 << 16),
- DEF_CONST(Instruction::CONST_WIDE_32, 50u, -1 << 16),
+ DEF_CONST(Instruction::CONST_WIDE_32, 50u, shift_minus_1(16)),
DEF_CONST(Instruction::CONST_WIDE_32, 52u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 54u, (1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE_32, 56u, -(1 << 16) + 1),
DEF_CONST(Instruction::CONST_WIDE_32, 58u, -(1 << 16) - 1),
DEF_CONST(Instruction::CONST_WIDE, 60u, INT64_C(1) << 32),
- DEF_CONST(Instruction::CONST_WIDE, 62u, INT64_C(-1) << 32),
+ DEF_CONST(Instruction::CONST_WIDE, 62u, shift_minus_1(32)),
DEF_CONST(Instruction::CONST_WIDE, 64u, (INT64_C(1) << 32) + 1),
DEF_CONST(Instruction::CONST_WIDE, 66u, (INT64_C(1) << 32) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 68u, (INT64_C(-1) << 32) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 70u, (INT64_C(-1) << 32) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 68u, shift_minus_1(32) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 70u, shift_minus_1(32) - 1),
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 72u, 1), // Effectively 1 << 48.
DEF_CONST(Instruction::CONST_WIDE_HIGH16, 74u, 0xffff), // Effectively -1 << 48.
DEF_CONST(Instruction::CONST_WIDE, 76u, (INT64_C(1) << 48) + 1),
DEF_CONST(Instruction::CONST_WIDE, 78u, (INT64_C(1) << 48) - 1),
- DEF_CONST(Instruction::CONST_WIDE, 80u, (INT64_C(-1) << 48) + 1),
- DEF_CONST(Instruction::CONST_WIDE, 82u, (INT64_C(-1) << 48) - 1),
+ DEF_CONST(Instruction::CONST_WIDE, 80u, shift_minus_1(48) + 1),
+ DEF_CONST(Instruction::CONST_WIDE, 82u, shift_minus_1(48) - 1),
};
PrepareMIRs(mirs);
@@ -868,7 +872,7 @@ TEST_F(LocalValueNumberingTest, Const) {
DEF_CONST(Instruction::CONST_4, 1u, 1),
DEF_CONST(Instruction::CONST_4, 2u, -1),
DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
- DEF_CONST(Instruction::CONST_16, 4u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 4u, shift_minus_1(4)),
DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
@@ -877,14 +881,14 @@ TEST_F(LocalValueNumberingTest, Const) {
DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff), // Effectively -1 << 16.
DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST, 13u, (-1 << 16) + 1),
- DEF_CONST(Instruction::CONST, 14u, (-1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 13u, shift_minus_1(16) + 1),
+ DEF_CONST(Instruction::CONST, 14u, shift_minus_1(16) - 1),
// FP reg constants.
DEF_CONST(Instruction::CONST_4, 15u, 0),
DEF_CONST(Instruction::CONST_4, 16u, 1),
DEF_CONST(Instruction::CONST_4, 17u, -1),
DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
- DEF_CONST(Instruction::CONST_16, 19u, -1 << 4),
+ DEF_CONST(Instruction::CONST_16, 19u, shift_minus_1(4)),
DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
@@ -893,8 +897,8 @@ TEST_F(LocalValueNumberingTest, Const) {
DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff), // Effectively -1 << 16.
DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
- DEF_CONST(Instruction::CONST, 28u, (-1 << 16) + 1),
- DEF_CONST(Instruction::CONST, 29u, (-1 << 16) - 1),
+ DEF_CONST(Instruction::CONST, 28u, shift_minus_1(16) + 1),
+ DEF_CONST(Instruction::CONST, 29u, shift_minus_1(16) - 1),
// null reference constant.
DEF_CONST(Instruction::CONST_4, 30u, 0),
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4efe4af896..b0972d98d4 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -511,9 +511,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags,
+ int width, int flags ATTRIBUTE_UNUSED,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
static_cast<int32_t>(insn->dalvikInsn.vB));
@@ -592,11 +591,15 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
}
/* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags, ArenaBitVector* try_block_addr,
- const uint16_t* code_ptr, const uint16_t* code_end,
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block,
+ MIR* insn,
+ DexOffset cur_offset,
+ int width,
+ int flags ATTRIBUTE_UNUSED,
+ ArenaBitVector* try_block_addr,
+ const uint16_t* code_ptr,
+ const uint16_t* code_end,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 0def056f4f..16414efada 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -53,10 +53,7 @@ class Pass {
* @param data the PassDataHolder.
* @return whether or not to execute the pass.
*/
- virtual bool Gate(const PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Gate(const PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Base class says yes.
return true;
}
@@ -64,17 +61,13 @@ class Pass {
/**
* @brief Start of the pass: called before the Worker function.
*/
- virtual void Start(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void Start(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
* @brief End of the pass: called after the WalkBasicBlocks function.
*/
- virtual void End(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void End(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 8762b53af4..34a6f630f1 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -125,8 +125,7 @@ class PassDriver {
* @brief Dispatch a patch.
* Gives the ability to add logic when running the patch.
*/
- virtual void DispatchPass(const Pass* pass) {
- UNUSED(pass);
+ virtual void DispatchPass(const Pass* pass ATTRIBUTE_UNUSED) {
}
/** @brief List of passes: provides the order to execute the passes.
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index db76cc6f53..b2bd6faca2 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -216,8 +216,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -239,8 +238,7 @@ void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
OpEndIT(it);
}
-void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -516,9 +514,8 @@ static const MagicTable magic_table[] = {
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -728,16 +725,19 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
return true;
}
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
UNREACHABLE();
}
@@ -1160,9 +1160,8 @@ void ArmMir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLoc
}
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
int first_bit, int second_bit) {
- UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1257,9 +1256,8 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
-void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
+void ArmMir2Lir::GenMulLong(Instruction::Code opcode ATTRIBUTE_UNUSED, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
/*
* tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
* dest = src1.lo * src2.lo;
@@ -1564,8 +1562,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
- UNUSED(flags);
+ int flags ATTRIBUTE_UNUSED) {
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 5f27338e6b..355485e03b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -987,8 +987,7 @@ int ArmMir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
return count;
}
-void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 062f7aff66..c31f46b8fe 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -419,20 +419,26 @@ LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return OpRegRegShift(op, r_dest_src1, r_src2, 0);
}
-LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* ArmMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
UNREACHABLE();
}
@@ -1243,14 +1249,17 @@ LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
return res;
}
-LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* ArmMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm";
UNREACHABLE();
}
-LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+LIR* ArmMir2Lir::InvokeTrampoline(OpKind op,
+ RegStorage r_tgt,
+ // The address of the trampoline is already loaded into r_tgt.
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 31cf6675af..d92dea21c2 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -37,14 +37,12 @@ LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage s
return OpCondBranch(cond, target);
}
-LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
- UNUSED(ccode, guide);
+LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
UNREACHABLE();
}
-void Arm64Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void Arm64Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -188,8 +186,7 @@ void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Con
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
-void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -413,9 +410,11 @@ static const MagicTable magic_table[] = {
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -457,9 +456,11 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d
return true;
}
-bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int64_t lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -599,15 +600,17 @@ bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_d
return true;
}
-bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
UNREACHABLE();
}
@@ -626,9 +629,11 @@ RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
UNREACHABLE();
}
@@ -963,14 +968,12 @@ void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset,
dex_cache_access_insns_.push_back(ldr);
}
-LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6efa11e1fd..691bfd9edd 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -881,8 +881,7 @@ int Arm64Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* /*info*/, int /*first*/, int c
return count;
}
-void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 483231f931..58769ea9cc 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -672,22 +672,26 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
- MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
UNREACHABLE();
}
@@ -1381,14 +1385,15 @@ LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
return store;
}
-LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
- UNUSED(r_dest, r_src);
+LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* Arm64Mir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index dbcc868657..cde99b3fae 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -992,8 +992,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
}
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
- UNUSED(offset);
+void Mir2Lir::MarkBoundary(DexOffset offset ATTRIBUTE_UNUSED, const char* inst_str) {
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -1358,8 +1357,8 @@ RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
return loc;
}
-void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2a1d64425b..2b60a51e22 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -2102,15 +2102,15 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
}
/* Call out to helper assembly routine that will null check obj and then lock it. */
-void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorEnter(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
-void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorExit(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 3c5c2fe010..422d82ffa2 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -521,10 +521,9 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_fro
* kArg1 here rather than the standard GenDalvikArgs.
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, const MethodReference& target_method,
+ int state, const MethodReference& target_method ATTRIBUTE_UNUSED,
uint32_t method_idx, uintptr_t, uintptr_t,
InvokeType) {
- UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -607,10 +606,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
return state + 1;
}
-static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
- QuickEntrypointEnum trampoline, int state,
- const MethodReference& target_method, uint32_t method_idx) {
- UNUSED(info, method_idx);
+static int NextInvokeInsnSP(CompilationUnit* cu,
+ CallInfo* info ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline,
+ int state,
+ const MethodReference& target_method,
+ uint32_t method_idx ATTRIBUTE_UNUSED) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
@@ -1266,35 +1267,31 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
return true;
}
-bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
+bool Mir2Lir::GenInlinedReverseBits(CallInfo* info ATTRIBUTE_UNUSED, OpSize size ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, size);
return false;
}
-bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
+bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, is_min, is_double);
return false;
}
-bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedCeil(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedFloor(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRint(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedRint(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
- UNUSED(info, is_double);
+bool Mir2Lir::GenInlinedRound(CallInfo* info ATTRIBUTE_UNUSED, bool is_double ATTRIBUTE_UNUSED) {
return false;
}
@@ -1328,8 +1325,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
return true;
}
-bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8863c058a1..4a736f3d93 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -32,9 +32,10 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ const InlineMethod& special ATTRIBUTE_UNUSED) {
// TODO
- UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 45fd1a9433..52706df7a5 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -115,17 +115,17 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
}
-void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
}
@@ -254,8 +254,10 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLoc
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
- UNUSED(bb, mir, gt_bias, is_double);
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ bool gt_bias ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -288,9 +290,10 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
-bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED) {
// TODO: need Mips implementation.
- UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1099303f7d..8ca53ea228 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -279,8 +279,7 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -290,13 +289,12 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
ne_branchover->target = target_label;
}
-void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
-void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -327,39 +325,40 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- UNUSED(info, is_long, is_object);
+bool MipsMir2Lir::GenInlinedCas(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED,
+ bool is_object ATTRIBUTE_UNUSED) {
return false;
}
-bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
@@ -408,27 +407,26 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
return true;
}
-void MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
- UNUSED(reg, target);
+void MipsMir2Lir::OpPcRelLoad(RegStorage reg ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
UNREACHABLE();
}
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -462,27 +460,28 @@ LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targ
return OpCmpImmBranch(c_code, reg, 0, target);
}
-bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
+bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool MipsMir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* MipsMir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in Mips";
UNREACHABLE();
}
-void MipsMir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void MipsMir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
@@ -621,9 +620,12 @@ void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(opcode);
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode ATTRIBUTE_UNUSED,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// TODO: Implement easy div/rem?
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
@@ -855,9 +857,11 @@ void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift, int flags) {
- UNUSED(flags);
+void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_shift,
+ int flags ATTRIBUTE_UNUSED) {
if (!cu_->target64) {
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index ec2475a7f7..372fe2b599 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -103,18 +103,15 @@ bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
-bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
@@ -520,21 +517,26 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
-LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* MipsMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
UNREACHABLE();
}
@@ -1031,14 +1033,14 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
return store;
}
-LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* MipsMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- UNUSED(cc, target);
+LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c50246d182..8da386368b 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1411,8 +1411,7 @@ void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const
rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
}
-size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNUSED(lir);
+size_t Mir2Lir::GetInstructionOffset(LIR* lir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4e3aab2f0b..a0db1e87ba 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1463,8 +1463,7 @@ class Mir2Lir {
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
- virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
- UNUSED(opcode);
+ virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode ATTRIBUTE_UNUSED) {
return InexpensiveConstantInt(value);
}
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index c2fe5538b7..6673ea8ac5 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -851,8 +851,8 @@ uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const {
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
-Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) {
- UNUSED(compilation_unit);
+Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu,
+ void* compilation_unit ATTRIBUTE_UNUSED) {
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 8ec86fa56c..d9d0434e8a 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -320,15 +320,13 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedDouble(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedSingle(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
UNREACHABLE();
}
@@ -1553,8 +1551,7 @@ int Mir2Lir::GetSRegHi(int lowSreg) {
return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
}
-bool Mir2Lir::LiveOut(int s_reg) {
- UNUSED(s_reg);
+bool Mir2Lir::LiveOut(int s_reg ATTRIBUTE_UNUSED) {
// For now.
return true;
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 12523aca76..e5d3841b14 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -21,6 +21,7 @@
#include "dex/compiler_ir.h"
#include "dex/quick/mir_to_lir.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "utils.h"
#include "x86_lir.h"
@@ -1629,8 +1630,8 @@ void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr) {
- UNUSED(start_addr);
+AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn,
+ CodeOffset start_addr ATTRIBUTE_UNUSED) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 8e81746db5..b11d41caf0 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -124,17 +124,17 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
-void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
}
-void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index ecd23e9ef0..a8706c3b09 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -270,8 +270,7 @@ void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
}
-void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void X86Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -597,8 +596,10 @@ void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& sh
shift = (is_long) ? p - 64 : p - 32;
}
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
- UNUSED(rl_dest, reg_lo, lit, is_div);
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
UNREACHABLE();
}
@@ -766,16 +767,19 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
return rl_result;
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
- bool is_div) {
- UNUSED(rl_dest, reg_lo, reg_hi, is_div);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ RegStorage reg_hi ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
UNREACHABLE();
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1449,22 +1453,21 @@ void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, R
}
}
-LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for x86";
UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1595,27 +1598,28 @@ LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe
return OpCondBranch(c_code, target);
}
-bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in x86";
UNREACHABLE();
}
-bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool X86Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* X86Mir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in x86";
UNREACHABLE();
}
-void X86Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void X86Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
UNREACHABLE();
}
@@ -1634,8 +1638,10 @@ void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
}
}
-void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
- UNUSED(sreg);
+void X86Mir2Lir::GenImulMemImm(RegStorage dest,
+ int sreg ATTRIBUTE_UNUSED,
+ int displacement,
+ int val) {
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2548,9 +2554,11 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
}
-RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, int shift_amount, int flags) {
- UNUSED(flags);
+RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src,
+ int shift_amount,
+ int flags ATTRIBUTE_UNUSED) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c62cd47315..75f3fef599 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -31,6 +31,7 @@
#include "mirror/array-inl.h"
#include "mirror/string.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "x86_lir.h"
namespace art {
@@ -254,8 +255,7 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
: RegStorage32FromSpecialTargetRegister_Target32[reg];
}
-RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- UNUSED(reg);
+RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Do not use this function!!!";
UNREACHABLE();
}
@@ -861,8 +861,7 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
}
// Not used in x86(-64)
-RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
- UNUSED(trampoline);
+RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
UNREACHABLE();
}
@@ -2323,13 +2322,11 @@ void X86Mir2Lir::GenSetVector(MIR* mir) {
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index b16ae982f2..61354dfc53 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -57,8 +57,7 @@ LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
return res;
}
-bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -66,8 +65,7 @@ bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
return value == 0;
}
-bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -942,9 +940,14 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
return store;
}
-LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target, LIR** compare) {
- UNUSED(temp_reg); // Comparison performed directly with memory.
+LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond,
+ // Comparison performed directly with memory.
+ RegStorage temp_reg ATTRIBUTE_UNUSED,
+ RegStorage base_reg,
+ int offset,
+ int check_value,
+ LIR* target,
+ LIR** compare) {
LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
offset, check_value);
if (compare != nullptr) {
@@ -1114,8 +1117,11 @@ RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
return loc;
}
-LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
+LIR* X86Mir2Lir::InvokeTrampoline(OpKind op,
+ // Call to absolute memory location doesn't
+ // need a temporary target register.
+ RegStorage r_tgt ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline) {
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 6f2b2341e0..65b0ad6400 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -34,7 +34,6 @@ VerificationResults::VerificationResults(const CompilerOptions* compiler_options
verified_methods_(),
rejected_classes_lock_("compiler rejected classes lock"),
rejected_classes_() {
- UNUSED(compiler_options);
}
VerificationResults::~VerificationResults() {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index e535afd272..1a7dbe3a9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -370,7 +370,9 @@ inline int CompilerDriver::IsFastInvoke(
nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file)));
+ auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ *devirt_target->dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
called_method = class_linker->ResolveMethod(
*devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
class_loader, nullptr, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 74f19a1029..b9565846ce 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -594,7 +594,7 @@ static void CompileMethod(Thread* self,
}
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
- } else {
+ } else if (Runtime::Current()->IsAotCompiler()) {
const VerifiedMethod* verified_method =
driver->GetVerificationResults()->GetVerifiedMethod(method_ref);
bool compile = compilation_enabled &&
@@ -633,6 +633,13 @@ static void CompileMethod(Thread* self,
? dex_to_dex_compilation_level
: optimizer::DexToDexCompilationLevel::kRequired);
}
+ } else {
+ // This is for the JIT compiler, which has already ensured the class is verified.
+ // We can go straight to compiling.
+ DCHECK(Runtime::Current()->UseJit());
+ compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type,
+ class_def_idx, method_idx, class_loader,
+ dex_file, dex_cache);
}
if (kTimeCompileMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
@@ -953,7 +960,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
StackHandleScope<2> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(
+ *dex_file,
+ Runtime::Current()->GetLinearAlloc())));
Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
@@ -2010,9 +2019,11 @@ class ResolveTypeVisitor : public CompilationVisitor {
ClassLinker* class_linker = manager_->GetClassLinker();
const DexFile& dex_file = *manager_->GetDexFile();
StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(dex_file)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == nullptr) {
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b6a40a203b..3d1b42f51c 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
#include "compiler_callbacks.h"
@@ -29,6 +30,7 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "oat_file-inl.h"
+#include "oat_quick_method_header.h"
#include "object_lock.h"
#include "thread_list.h"
#include "verifier/method_verifier-inl.h"
@@ -86,7 +88,37 @@ JitCompiler::JitCompiler() : total_time_(0) {
/* init_failure_output */ nullptr,
/* abort_on_hard_verifier_failure */ false));
const InstructionSet instruction_set = kRuntimeISA;
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ VLOG(compiler) << "JIT compiler option " << option;
+ std::string error_msg;
+ if (option.starts_with("--instruction-set-variant=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ VLOG(compiler) << "JIT instruction set variant " << str;
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ } else if (option.starts_with("--instruction-set-features=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ VLOG(compiler) << "JIT instruction set features " << str;
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, "default", &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ }
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ }
cumulative_logger_.reset(new CumulativeLogger("jit times"));
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
@@ -126,61 +158,66 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method) {
StackHandleScope<2> hs(self);
self->AssertNoPendingException();
Runtime* runtime = Runtime::Current();
+
+ // Check if the method is already compiled.
if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
VLOG(jit) << "Already compiled " << PrettyMethod(method);
- return true; // Already compiled
+ return true;
}
- Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
- {
- TimingLogger::ScopedTiming t2("Initializing", &logger);
- if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method);
- return false;
- }
+
+ // Don't compile the method if we are supposed to be deoptimized.
+ if (runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
+ return false;
}
- const DexFile* dex_file = h_class->GetDexCache()->GetDexFile();
- MethodReference method_ref(dex_file, method->GetDexMethodIndex());
- // Only verify if we don't already have verification results.
- if (verification_results_->GetVerifiedMethod(method_ref) == nullptr) {
- TimingLogger::ScopedTiming t2("Verifying", &logger);
- std::string error;
- if (verifier::MethodVerifier::VerifyMethod(method, true, &error) ==
- verifier::MethodVerifier::kHardFailure) {
- VLOG(jit) << "Not compile method " << PrettyMethod(method)
- << " due to verification failure " << error;
- return false;
- }
+
+ // Ensure the class is initialized.
+ Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
+ if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+ VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method);
+ return false;
}
+
+ // Do the compilation.
CompiledMethod* compiled_method = nullptr;
{
TimingLogger::ScopedTiming t2("Compiling", &logger);
compiled_method = compiler_driver_->CompileArtMethod(self, method);
}
+
+ // Trim maps to reduce memory usage.
+ // TODO: measure how much this increases compile time.
{
TimingLogger::ScopedTiming t2("TrimMaps", &logger);
- // Trim maps to reduce memory usage, TODO: measure how much this increases compile time.
runtime->GetArenaPool()->TrimMaps();
}
+
+ // Check if we failed compiling.
if (compiled_method == nullptr) {
return false;
}
+
total_time_ += NanoTime() - start_time;
- // Don't add the method if we are supposed to be deoptimized.
bool result = false;
- if (!runtime->GetInstrumentation()->AreAllMethodsDeoptimized()) {
- const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method);
- if (code != nullptr) {
- // Already have some compiled code, just use this instead of linking.
- // TODO: Fix recompilation.
- method->SetEntryPointFromQuickCompiledCode(code);
+ const void* code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(method);
+
+ if (code != nullptr) {
+ // Already have some compiled code, just use this instead of linking.
+ // TODO: Fix recompilation.
+ method->SetEntryPointFromQuickCompiledCode(code);
+ result = true;
+ } else {
+ TimingLogger::ScopedTiming t2("LinkCode", &logger);
+ OatFile::OatMethod oat_method(nullptr, 0);
+ if (AddToCodeCache(method, compiled_method, &oat_method)) {
+ oat_method.LinkMethod(method);
+ CHECK(runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) << PrettyMethod(method);
result = true;
- } else {
- TimingLogger::ScopedTiming t2("MakeExecutable", &logger);
- result = MakeExecutable(compiled_method, method);
}
}
+
// Remove the compiled method to save memory.
- compiler_driver_->RemoveCompiledMethod(method_ref);
+ compiler_driver_->RemoveCompiledMethod(
+ MethodReference(h_class->GetDexCache()->GetDexFile(), method->GetDexMethodIndex()));
runtime->GetJit()->AddTimingLogger(logger);
return result;
}
@@ -189,41 +226,8 @@ CompilerCallbacks* JitCompiler::GetCompilerCallbacks() const {
return callbacks_.get();
}
-uint8_t* JitCompiler::WriteMethodHeaderAndCode(const CompiledMethod* compiled_method,
- uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table,
- const uint8_t* vmap_table,
- const uint8_t* gc_map) {
- reserve_begin += sizeof(OatQuickMethodHeader);
- reserve_begin = reinterpret_cast<uint8_t*>(
- compiled_method->AlignCode(reinterpret_cast<uintptr_t>(reserve_begin)));
- const auto* quick_code = compiled_method->GetQuickCode();
- CHECK_LE(reserve_begin, reserve_end);
- CHECK_LE(quick_code->size(), static_cast<size_t>(reserve_end - reserve_begin));
- auto* code_ptr = reserve_begin;
- OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
- // Construct the header last.
- const auto frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- const auto core_spill_mask = compiled_method->GetCoreSpillMask();
- const auto fp_spill_mask = compiled_method->GetFpSpillMask();
- const auto code_size = quick_code->size();
- CHECK_NE(code_size, 0U);
- std::copy(quick_code->data(), quick_code->data() + code_size, code_ptr);
- // After we are done writing we need to update the method header.
- // Write out the method header last.
- method_header = new(method_header) OatQuickMethodHeader(
- (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
- (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
- (gc_map == nullptr) ? 0 : code_ptr - gc_map,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
- code_size);
- // Return the code ptr.
- return code_ptr;
-}
-
-bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
+bool JitCompiler::AddToCodeCache(ArtMethod* method,
+ const CompiledMethod* compiled_method,
OatFile::OatMethod* out_method) {
Runtime* runtime = Runtime::Current();
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
@@ -233,7 +237,6 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil
}
const auto code_size = quick_code->size();
Thread* const self = Thread::Current();
- const uint8_t* base = code_cache->CodeCachePtr();
auto* const mapping_table = compiled_method->GetMappingTable();
auto* const vmap_table = compiled_method->GetVmapTable();
auto* const gc_map = compiled_method->GetGcMap();
@@ -266,45 +269,35 @@ bool JitCompiler::AddToCodeCache(ArtMethod* method, const CompiledMethod* compil
}
}
- // Don't touch this until you protect / unprotect the code.
- const size_t reserve_size = sizeof(OatQuickMethodHeader) + quick_code->size() + 32;
- uint8_t* const code_reserve = code_cache->ReserveCode(self, reserve_size);
- if (code_reserve == nullptr) {
+ uint8_t* const code = code_cache->CommitCode(self,
+ mapping_table_ptr,
+ vmap_table_ptr,
+ gc_map_ptr,
+ compiled_method->GetFrameSizeInBytes(),
+ compiled_method->GetCoreSpillMask(),
+ compiled_method->GetFpSpillMask(),
+ compiled_method->GetQuickCode()->data(),
+ compiled_method->GetQuickCode()->size());
+
+ if (code == nullptr) {
return false;
}
- auto* code_ptr = WriteMethodHeaderAndCode(
- compiled_method, code_reserve, code_reserve + reserve_size, mapping_table_ptr,
- vmap_table_ptr, gc_map_ptr);
-
- __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
- reinterpret_cast<char*>(code_ptr + quick_code->size()));
const size_t thumb_offset = compiled_method->CodeDelta();
- const uint32_t code_offset = code_ptr - base + thumb_offset;
- *out_method = OatFile::OatMethod(base, code_offset);
+ const uint32_t code_offset = sizeof(OatQuickMethodHeader) + thumb_offset;
+ *out_method = OatFile::OatMethod(code, code_offset);
DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
- VLOG(jit) << "JIT added " << PrettyMethod(method) << "@" << method << " ccache_size="
- << PrettySize(code_cache->CodeCacheSize()) << ": " << reinterpret_cast<void*>(code_ptr)
- << "," << reinterpret_cast<void*>(code_ptr + code_size);
- return true;
-}
-
-bool JitCompiler::MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method) {
- CHECK(method != nullptr);
- CHECK(compiled_method != nullptr);
- OatFile::OatMethod oat_method(nullptr, 0);
- if (!AddToCodeCache(method, compiled_method, &oat_method)) {
- return false;
- }
- // TODO: Flush instruction cache.
- oat_method.LinkMethod(method);
- CHECK(Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method))
- << PrettyMethod(method);
+ VLOG(jit)
+ << "JIT added "
+ << PrettyMethod(method) << "@" << method
+ << " ccache_size=" << PrettySize(code_cache->CodeCacheSize()) << ": "
+ << reinterpret_cast<void*>(code + code_offset)
+ << "," << reinterpret_cast<void*>(code + code_offset + code_size);
return true;
}
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index ef68caa5fa..757f3f386a 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -39,10 +39,6 @@ class JitCompiler {
virtual ~JitCompiler();
bool CompileMethod(Thread* self, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_);
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
CompilerCallbacks* GetCompilerCallbacks() const;
size_t GetTotalCompileTime() const {
return total_time_;
@@ -58,12 +54,13 @@ class JitCompiler {
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
- explicit JitCompiler();
- uint8_t* WriteMethodHeaderAndCode(
- const CompiledMethod* compiled_method, uint8_t* reserve_begin, uint8_t* reserve_end,
- const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
- bool MakeExecutable(CompiledMethod* compiled_method, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ JitCompiler();
+
+ // This is in the compiler since the runtime doesn't have access to the compiled method
+ // structures.
+ bool AddToCodeCache(ArtMethod* method,
+ const CompiledMethod* compiled_method,
+ OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 5af2242963..16b4386938 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -394,76 +394,77 @@ static constexpr uint8_t expected_cfi_kMips[] = {
// 0x0000006c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
- 0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
- 0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF,
- 0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF,
- 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF,
- 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF,
- 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF,
- 0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF,
- 0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF,
- 0x50, 0x00, 0xBE, 0xDF, 0x58, 0x00, 0xBF, 0xDF, 0x60, 0x00, 0xBD, 0x67,
+ 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF,
+ 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF,
+ 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF,
+ 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF,
+ 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF,
+ 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF,
+ 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF,
+ 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF,
+ 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
static constexpr uint8_t expected_cfi_kMips64[] = {
- 0x44, 0x0E, 0x60, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
+ 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E,
- 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x80, 0x01, 0x44, 0x0E,
- 0x60, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
+ 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E,
+ 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48,
- 0x0B, 0x0E, 0x60,
+ 0x0B, 0x0E, 0x70,
};
-// 0x00000000: daddiu r29, r29, -96
-// 0x00000004: .cfi_def_cfa_offset: 96
-// 0x00000004: sd r31, +88(r29)
+// 0x00000000: daddiu r29, r29, -112
+// 0x00000004: .cfi_def_cfa_offset: 112
+// 0x00000004: sd r31, +104(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r30, +80(r29)
+// 0x00000008: sd r30, +96(r29)
// 0x0000000c: .cfi_offset: r30 at cfa-16
-// 0x0000000c: sd r28, +72(r29)
+// 0x0000000c: sd r28, +88(r29)
// 0x00000010: .cfi_offset: r28 at cfa-24
-// 0x00000010: sd r23, +64(r29)
+// 0x00000010: sd r23, +80(r29)
// 0x00000014: .cfi_offset: r23 at cfa-32
-// 0x00000014: sd r22, +56(r29)
+// 0x00000014: sd r22, +72(r29)
// 0x00000018: .cfi_offset: r22 at cfa-40
-// 0x00000018: sd r21, +48(r29)
+// 0x00000018: sd r21, +64(r29)
// 0x0000001c: .cfi_offset: r21 at cfa-48
-// 0x0000001c: sd r20, +40(r29)
+// 0x0000001c: sd r20, +56(r29)
// 0x00000020: .cfi_offset: r20 at cfa-56
-// 0x00000020: sd r19, +32(r29)
+// 0x00000020: sd r19, +48(r29)
// 0x00000024: .cfi_offset: r19 at cfa-64
-// 0x00000024: sd r18, +24(r29)
+// 0x00000024: sd r18, +40(r29)
// 0x00000028: .cfi_offset: r18 at cfa-72
// 0x00000028: sd r4, +0(r29)
-// 0x0000002c: sw r5, +104(r29)
-// 0x00000030: swc1 f14, +108(r29)
-// 0x00000034: sw r7, +112(r29)
-// 0x00000038: sw r8, +116(r29)
+// 0x0000002c: sw r5, +120(r29)
+// 0x00000030: swc1 f14, +124(r29)
+// 0x00000034: sw r7, +128(r29)
+// 0x00000038: sw r8, +132(r29)
// 0x0000003c: daddiu r29, r29, -32
-// 0x00000040: .cfi_def_cfa_offset: 128
+// 0x00000040: .cfi_def_cfa_offset: 144
// 0x00000040: daddiu r29, r29, 32
-// 0x00000044: .cfi_def_cfa_offset: 96
+// 0x00000044: .cfi_def_cfa_offset: 112
// 0x00000044: .cfi_remember_state
-// 0x00000044: ld r18, +24(r29)
+// 0x00000044: ld r18, +40(r29)
// 0x00000048: .cfi_restore: r18
-// 0x00000048: ld r19, +32(r29)
+// 0x00000048: ld r19, +48(r29)
// 0x0000004c: .cfi_restore: r19
-// 0x0000004c: ld r20, +40(r29)
+// 0x0000004c: ld r20, +56(r29)
// 0x00000050: .cfi_restore: r20
-// 0x00000050: ld r21, +48(r29)
+// 0x00000050: ld r21, +64(r29)
// 0x00000054: .cfi_restore: r21
-// 0x00000054: ld r22, +56(r29)
+// 0x00000054: ld r22, +72(r29)
// 0x00000058: .cfi_restore: r22
-// 0x00000058: ld r23, +64(r29)
+// 0x00000058: ld r23, +80(r29)
// 0x0000005c: .cfi_restore: r23
-// 0x0000005c: ld r28, +72(r29)
+// 0x0000005c: ld r28, +88(r29)
// 0x00000060: .cfi_restore: r28
-// 0x00000060: ld r30, +80(r29)
+// 0x00000060: ld r30, +96(r29)
// 0x00000064: .cfi_restore: r30
-// 0x00000064: ld r31, +88(r29)
+// 0x00000064: ld r31, +104(r29)
// 0x00000068: .cfi_restore: r31
-// 0x00000068: daddiu r29, r29, 96
+// 0x00000068: daddiu r29, r29, 112
// 0x0000006c: .cfi_def_cfa_offset: 0
// 0x0000006c: jr r31
// 0x00000070: nop
// 0x00000074: .cfi_restore_state
-// 0x00000074: .cfi_def_cfa_offset: 96
+// 0x00000074: .cfi_def_cfa_offset: 112
+
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index ecf143d8f5..2d31a9881e 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -183,7 +183,7 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
}
size_t MipsJniCallingConvention::FrameSize() {
- // Method*, LR and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kMipsPointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 3a11bcfe9c..807d740b4d 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -140,6 +140,7 @@ uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
+ DCHECK_EQ(static_cast<size_t>(POPCOUNT(result)), callee_save_regs_.size() + 1);
return result;
}
@@ -148,9 +149,9 @@ ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
}
size_t Mips64JniCallingConvention::FrameSize() {
- // Mehtod* and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kFramePointerSize +
- CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 9c7eab1cc7..b6b11ca51f 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,8 +38,7 @@ ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
}
-static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
- UNUSED(jni);
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index ceace824ea..cb9ea38b1c 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -18,6 +18,7 @@
#include "compiled_method.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "output_stream.h"
namespace art {
diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc
index 13f67e6fd4..551531314a 100644
--- a/compiler/linker/arm/relative_patcher_thumb2_test.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc
@@ -16,6 +16,7 @@
#include "linker/relative_patcher_test.h"
#include "linker/arm/relative_patcher_thumb2.h"
+#include "oat_quick_method_header.h"
namespace art {
namespace linker {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 6b9c530d7a..6f234a8367 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -22,6 +22,7 @@
#include "driver/compiler_driver.h"
#include "utils/arm64/assembler_arm64.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "output_stream.h"
namespace art {
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index b3af4c6a05..857d5842b5 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -16,6 +16,7 @@
#include "linker/relative_patcher_test.h"
#include "linker/arm64/relative_patcher_arm64.h"
+#include "oat_quick_method_header.h"
namespace art {
namespace linker {
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 31d1bced9c..e357662388 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -30,6 +30,7 @@
#include "linker/relative_patcher.h"
#include "method_reference.h"
#include "oat.h"
+#include "oat_quick_method_header.h"
#include "utils/array_ref.h"
#include "vector_output_stream.h"
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 7b410bfe37..640698b0e9 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -31,6 +31,7 @@
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "image_writer.h"
#include "linker/relative_patcher.h"
@@ -38,11 +39,13 @@
#include "mirror/class_loader.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
+#include "oat_quick_method_header.h"
#include "os.h"
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -143,6 +146,18 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
}
size_ = offset;
+ if (!HasImage()) {
+ // Allocate space for app dex cache arrays in the .bss section.
+ size_t bss_start = RoundUp(size_, kPageSize);
+ size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ bss_size_ = 0u;
+ for (const DexFile* dex_file : dex_files) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
+ }
+
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
CHECK_EQ(compiler->IsImage(), image_writer_ != nullptr);
CHECK_EQ(compiler->IsImage(),
@@ -655,10 +670,10 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
no_thread_suspension_(soa_.Self(), "OatWriter patching"),
class_linker_(Runtime::Current()->GetClassLinker()),
dex_cache_(nullptr) {
- if (writer_->image_writer_ != nullptr) {
+ patched_code_.reserve(16 * KB);
+ if (writer_->HasImage()) {
// If we're creating the image, the address space must be ready so that we can apply patches.
CHECK(writer_->image_writer_->IsImageAddressSpaceReady());
- patched_code_.reserve(16 * KB);
}
}
@@ -841,24 +856,28 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
const uint8_t* oat_data = writer_->image_writer_->GetOatFileBegin() + file_offset_;
return element - oat_data;
} else {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
+ size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
+ return start + patch.TargetDexCacheElementOffset();
}
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
object = writer_->image_writer_->GetImageAddress(object);
+ } else {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the object is in the image space.
+ DCHECK(Runtime::Current()->GetHeap()->FindSpaceFromObject(object, false)->IsImageSpace());
}
+ // Note: We only patch targeting Objects in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(object);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -870,12 +889,17 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
method = writer_->image_writer_->GetImageMethodAddress(method);
+ } else if (kIsDebugBuild) {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the method is an image method.
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
}
- // Note: We only patch ArtMethods to low 4gb since thats where the image is.
+ // Note: We only patch targeting ArtMethods in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(method);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -887,9 +911,11 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
- PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
- writer_->oat_data_offset_ + target_offset);
+ uint32_t address = target_offset;
+ if (writer_->HasImage()) {
+ address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
+ writer_->oat_data_offset_ + target_offset);
+ }
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
data[0] = address & 0xffu;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 48fbc0b2ac..d6cb65bd64 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -90,6 +90,13 @@ class OatWriter {
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store);
+ // Returns whether the oat file has an associated image.
+ bool HasImage() const {
+ // Since the image is being created at the same time as the oat file,
+ // check if there's an image writer.
+ return image_writer_ != nullptr;
+ }
+
const OatHeader& GetOatHeader() const {
return *oat_header_;
}
@@ -272,6 +279,10 @@ class OatWriter {
// The size of the required .bss section holding the DexCache data.
size_t bss_size_;
+ // Offsets of the dex cache arrays for each app dex file. For the
+ // boot image, this information is provided by the ImageWriter.
+ SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; // DexFiles not owned.
+
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index 5b346872b0..f985745e7a 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -69,19 +69,17 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
if (cond->IsCondition()) {
HInstruction* lhs = cond->InputAt(0);
HInstruction* rhs = cond->InputAt(1);
- if (cond->IsEqual()) {
- return new (allocator) HNotEqual(lhs, rhs);
- } else if (cond->IsNotEqual()) {
- return new (allocator) HEqual(lhs, rhs);
- } else if (cond->IsLessThan()) {
- return new (allocator) HGreaterThanOrEqual(lhs, rhs);
- } else if (cond->IsLessThanOrEqual()) {
- return new (allocator) HGreaterThan(lhs, rhs);
- } else if (cond->IsGreaterThan()) {
- return new (allocator) HLessThanOrEqual(lhs, rhs);
- } else {
- DCHECK(cond->IsGreaterThanOrEqual());
- return new (allocator) HLessThan(lhs, rhs);
+ switch (cond->AsCondition()->GetOppositeCondition()) { // get *opposite*
+ case kCondEQ: return new (allocator) HEqual(lhs, rhs);
+ case kCondNE: return new (allocator) HNotEqual(lhs, rhs);
+ case kCondLT: return new (allocator) HLessThan(lhs, rhs);
+ case kCondLE: return new (allocator) HLessThanOrEqual(lhs, rhs);
+ case kCondGT: return new (allocator) HGreaterThan(lhs, rhs);
+ case kCondGE: return new (allocator) HGreaterThanOrEqual(lhs, rhs);
+ case kCondB: return new (allocator) HBelow(lhs, rhs);
+ case kCondBE: return new (allocator) HBelowOrEqual(lhs, rhs);
+ case kCondA: return new (allocator) HAbove(lhs, rhs);
+ case kCondAE: return new (allocator) HAboveOrEqual(lhs, rhs);
}
} else if (cond->IsIntConstant()) {
HIntConstant* int_const = cond->AsIntConstant();
@@ -91,11 +89,10 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
DCHECK(int_const->IsOne());
return graph->GetIntConstant(0);
}
- } else {
- // General case when 'cond' is another instruction of type boolean,
- // as verified by SSAChecker.
- return new (allocator) HBooleanNot(cond);
}
+ // General case when 'cond' is another instruction of type boolean,
+ // as verified by SSAChecker.
+ return new (allocator) HBooleanNot(cond);
}
void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index ce6dc75741..c9afdf2147 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -71,9 +71,9 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -168,9 +168,9 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -232,9 +232,9 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
HInstruction* parameter1 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimNot); // array
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); // array
HInstruction* parameter2 = new (&allocator_)
- HParameterValue(0, Primitive::kPrimInt); // i
+ HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
@@ -295,7 +295,8 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_5 = graph_->GetIntConstant(5);
@@ -363,7 +364,8 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -477,7 +479,8 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -689,7 +692,8 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_initial = graph->GetIntConstant(initial);
@@ -791,7 +795,8 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(entry);
graph_->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HInstruction* constant_0 = graph_->GetIntConstant(0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 21540e8ed7..5dd5be3259 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -159,9 +159,13 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
int locals_index = locals_.size() - number_of_parameters;
int parameter_index = 0;
+ const DexFile::MethodId& referrer_method_id =
+ dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
+ HParameterValue* parameter = new (arena_) HParameterValue(*dex_file_,
+ referrer_method_id.class_idx_,
+ parameter_index++,
Primitive::kPrimNot,
true);
entry_block_->AddInstruction(parameter);
@@ -170,11 +174,16 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
number_of_parameters--;
}
- uint32_t pos = 1;
- for (int i = 0; i < number_of_parameters; i++) {
- HParameterValue* parameter = new (arena_) HParameterValue(parameter_index++,
- Primitive::GetType(shorty[pos++]),
- false);
+ const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
+ HParameterValue* parameter = new (arena_) HParameterValue(
+ *dex_file_,
+ arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
+ parameter_index++,
+ Primitive::GetType(shorty[shorty_pos]),
+ false);
+ ++shorty_pos;
entry_block_->AddInstruction(parameter);
HLocal* local = GetLocalAt(locals_index++);
// Store the parameter value in the local that the dex code will use
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6a743ebbc9..1c62dfa859 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -32,6 +32,10 @@
#include "code_generator_x86_64.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "code_generator_mips.h"
+#endif
+
#ifdef ART_ENABLE_CODEGEN_mips64
#include "code_generator_mips64.h"
#endif
@@ -742,11 +746,12 @@ CodeGenerator* CodeGenerator::Create(HGraph* graph,
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
- UNUSED(compiler_options);
- UNUSED(graph);
- UNUSED(isa_features);
- return nullptr;
+ case kMips: {
+ return new mips::CodeGeneratorMIPS(graph,
+ *isa_features.AsMipsInstructionSetFeatures(),
+ compiler_options,
+ stats);
+ }
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8c1820b6cd..92a5878476 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -409,7 +409,7 @@ class ArraySetSlowPathARM : public SlowPathCode {
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
-inline Condition ARMSignedOrFPCondition(IfCondition cond) {
+inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
@@ -417,19 +417,30 @@ inline Condition ARMSignedOrFPCondition(IfCondition cond) {
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition.
inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
+ // Signed to unsigned.
case kCondLT: return LO;
case kCondLE: return LS;
case kCondGT: return HI;
case kCondGE: return HS;
+ // Unsigned remain unchanged.
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -1130,8 +1141,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
@@ -1149,12 +1159,13 @@ void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
__ vmstat(); // transfer FP status register to ARM APSR.
+ // TODO: merge into a single branch (except "equal or unordered" and "not equal")
if (cond->IsFPConditionTrueIfNaN()) {
__ b(true_label, VS); // VS for unordered.
} else if (cond->IsFPConditionFalseIfNaN()) {
__ b(false_label, VS); // VS for unordered.
}
- __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
+ __ b(true_label, ARMCondition(cond->GetCondition()));
}
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
@@ -1169,10 +1180,11 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = ARMUnsignedCondition(if_cond);
+ Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
+ // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -1190,6 +1202,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
@@ -1198,12 +1222,12 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
GenerateCompareWithImmediate(left_high, val_high);
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
GenerateCompareWithImmediate(left_low, val_low);
@@ -1213,17 +1237,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
__ cmp(left_high, ShifterOperand(right_high));
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
__ cmp(left_low, ShifterOperand(right_low));
}
// The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
__ b(true_label, final_condition);
}
@@ -1315,7 +1340,7 @@ void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instructio
DCHECK(right.IsConstant());
GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
+ __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
}
}
if (false_target != nullptr) {
@@ -1351,8 +1376,7 @@ void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -1417,11 +1441,11 @@ void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
GenerateCompareWithImmediate(left.AsRegister<Register>(),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
+ __ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMSignedOrFPCondition(cond->GetCondition()));
+ ARMCondition(cond->GetCondition()));
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMSignedOrFPCondition(cond->GetOppositeCondition()));
+ ARMCondition(cond->GetOppositeCondition()));
return;
}
case Primitive::kPrimLong:
@@ -1500,6 +1524,38 @@ void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderARM::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1512,9 +1568,8 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -1541,8 +1596,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -1551,9 +1605,8 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
@@ -1562,9 +1615,8 @@ void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1573,9 +1625,8 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1584,9 +1635,8 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1595,9 +1645,8 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1612,8 +1661,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -1623,8 +1671,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3270,8 +3317,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4232,13 +4278,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5286,15 +5330,13 @@ Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_m
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
-void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c94da86d2c..f68b11b504 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -77,6 +77,10 @@ inline Condition ARM64Condition(IfCondition cond) {
case kCondLE: return le;
case kCondGT: return gt;
case kCondGE: return ge;
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -1326,8 +1330,7 @@ enum UnimplementedInstructionBreakCode {
};
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
- UNUSED(instr); \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -1937,7 +1940,11 @@ void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
M(LessThan) \
M(LessThanOrEqual) \
M(GreaterThan) \
- M(GreaterThanOrEqual)
+ M(GreaterThanOrEqual) \
+ M(Below) \
+ M(BelowOrEqual) \
+ M(Above) \
+ M(AboveOrEqual)
#define DEFINE_CONDITION_VISITORS(Name) \
void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
@@ -2175,8 +2182,8 @@ void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2184,8 +2191,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
@@ -2194,8 +2200,7 @@ void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2348,8 +2353,7 @@ void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -2682,9 +2686,8 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
@@ -2692,9 +2695,8 @@ void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
@@ -3085,9 +3087,8 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
@@ -3124,9 +3125,8 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -3393,8 +3393,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -3464,8 +3463,7 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3473,8 +3471,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
instruction->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3518,8 +3515,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -3636,9 +3632,8 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -3737,15 +3732,13 @@ void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
HandleBinaryOp(instruction);
}
-void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
new file mode 100644
index 0000000000..4404aa3289
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -0,0 +1,4186 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_mips.h"
+
+#include "arch/mips/entrypoints_direct_mips.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "art_method.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "gc/accounting/card_table.h"
+#include "intrinsics.h"
+#include "mirror/array-inl.h"
+#include "mirror/class-inl.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils/assembler.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/stack_checks.h"
+
+namespace art {
+namespace mips {
+
+static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = A0;
+
+// We need extra temporary/scratch registers (in addition to AT) in some cases.
+static constexpr Register TMP = T8;
+static constexpr FRegister FTMP = F8;
+
+// ART Thread Register.
+static constexpr Register TR = S1;
+
+Location MipsReturnLocation(Primitive::Type return_type) {
+ switch (return_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ return Location::RegisterLocation(V0);
+
+ case Primitive::kPrimLong:
+ return Location::RegisterPairLocation(V0, V1);
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ return Location::FpuRegisterLocation(F0);
+
+ case Primitive::kPrimVoid:
+ return Location();
+ }
+ UNREACHABLE();
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(Primitive::Type type) const {
+ return MipsReturnLocation(type);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
+ return Location::RegisterLocation(kMethodRegisterArgument);
+}
+
+Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ uint32_t gp_index = gp_index_++;
+ if (gp_index < calling_convention.GetNumberOfRegisters()) {
+ next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t gp_index = gp_index_;
+ gp_index_ += 2;
+ if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
+ if (calling_convention.GetRegisterAt(gp_index) == A1) {
+ gp_index_++; // Skip A1, and use A2_A3 instead.
+ gp_index++;
+ }
+ Register low_even = calling_convention.GetRegisterAt(gp_index);
+ Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
+ DCHECK_EQ(low_even + 1, high_odd);
+ next_location = Location::RegisterPairLocation(low_even, high_odd);
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Location::DoubleStackSlot(stack_offset);
+ }
+ break;
+ }
+
+ // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
+ // will take up the even/odd pair, while floats are stored in even regs only.
+ // On 64 bit FPU, both double and float are stored in even registers only.
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ uint32_t float_index = float_index_++;
+ if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
+ next_location = Location::FpuRegisterLocation(
+ calling_convention.GetFpuRegisterAt(float_index));
+ } else {
+ size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
+ next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
+ : Location::StackSlot(stack_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected parameter type " << type;
+ break;
+ }
+
+ // Space on the stack is reserved for all arguments.
+ stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
+
+ return next_location;
+}
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
+ return MipsReturnLocation(type);
+}
+
+#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(0),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimInt,
+ locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimInt);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowArrayBounds));
+ CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; }
+
+ private:
+ HBoundsCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
+};
+
+class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowDivZero));
+ CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; }
+
+ private:
+ HDivZeroCheck* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
+};
+
+class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ LoadClassSlowPathMIPS(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
+
+ int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ bool direct = do_clinit_ ? IsDirectEntrypoint(kQuickInitializeStaticStorage)
+ : IsDirectEntrypoint(kQuickInitializeType);
+
+ mips_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this, direct);
+ if (do_clinit_) {
+ CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
+ } else {
+ CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
+ }
+
+ // Move the class to the desired location.
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ Primitive::Type type = at_->GetType();
+ mips_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS"; }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
+};
+
+class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickResolveString));
+ CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
+ Primitive::Type type = instruction_->GetType();
+ mips_codegen->MoveLocation(locations->Out(),
+ calling_convention.GetReturnLocation(type),
+ type);
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
+};
+
+class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ if (instruction_->CanThrowIntoCatchBlock()) {
+ // Live registers will be restored in the catch block if caught.
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ }
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickThrowNullPointer));
+ CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
+ }
+
+ bool IsFatal() const OVERRIDE { return true; }
+
+ const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
+};
+
+class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
+ instruction_,
+ instruction_->GetDexPc(),
+ this,
+ IsDirectEntrypoint(kQuickTestSuspend));
+ CheckEntrypointTypes<kQuickTestSuspend, void, void>();
+ RestoreLiveRegisters(codegen, instruction_->GetLocations());
+ if (successor_ == nullptr) {
+ __ B(GetReturnLabel());
+ } else {
+ __ B(mips_codegen->GetLabelOf(successor_));
+ }
+ }
+
+ MipsLabel* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+ const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; }
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ MipsLabel return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
+};
+
+class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
+ uint32_t dex_pc = instruction_->GetDexPc();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Primitive::kPrimNot,
+ object_class,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ Primitive::kPrimNot);
+
+ if (instruction_->IsInstanceOf()) {
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickInstanceofNonTrivial));
+ Primitive::Type ret_type = instruction_->GetType();
+ Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+ mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+ CheckEntrypointTypes<kQuickInstanceofNonTrivial,
+ uint32_t,
+ const mirror::Class*,
+ const mirror::Class*>();
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickCheckCast));
+ CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
+ }
+
+ RestoreLiveRegisters(codegen, locations);
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
+};
+
+class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
+ public:
+ explicit DeoptimizationSlowPathMIPS(HInstruction* instruction)
+ : instruction_(instruction) {}
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ SaveLiveRegisters(codegen, instruction_->GetLocations());
+ DCHECK(instruction_->IsDeoptimize());
+ HDeoptimize* deoptimize = instruction_->AsDeoptimize();
+ uint32_t dex_pc = deoptimize->GetDexPc();
+ CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
+ mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
+ instruction_,
+ dex_pc,
+ this,
+ IsDirectEntrypoint(kQuickDeoptimize));
+ }
+
+ const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
+
+ private:
+ HInstruction* const instruction_;
+ DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
+};
+
+CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
+ : CodeGenerator(graph,
+ kNumberOfCoreRegisters,
+ kNumberOfFRegisters,
+ kNumberOfRegisterPairs,
+ ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
+ arraysize(kCoreCalleeSaves)),
+ ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
+ arraysize(kFpuCalleeSaves)),
+ compiler_options,
+ stats),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this),
+ assembler_(&isa_features),
+ isa_features_(isa_features) {
+ // Save RA (containing the return address) to mimic Quick.
+ AddAllocatedRegister(Location::RegisterLocation(RA));
+}
+
+#undef __
+#define __ down_cast<MipsAssembler*>(GetAssembler())->
+#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, x).Int32Value()
+
+void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
+ // Ensure that we fix up branches.
+ __ FinalizeCode();
+
+ // Adjust native pc offsets in stack maps.
+ for (size_t i = 0, num = stack_map_stream_.GetNumberOfStackMaps(); i != num; ++i) {
+ uint32_t old_position = stack_map_stream_.GetStackMap(i).native_pc_offset;
+ uint32_t new_position = __ GetAdjustedPosition(old_position);
+ DCHECK_GE(new_position, old_position);
+ stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
+ }
+
+ // Adjust pc offsets for the disassembly information.
+ if (disasm_info_ != nullptr) {
+ GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
+ frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
+ frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
+ for (auto& it : *disasm_info_->GetInstructionIntervals()) {
+ it.second.start = __ GetAdjustedPosition(it.second.start);
+ it.second.end = __ GetAdjustedPosition(it.second.end);
+ }
+ for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
+ it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
+ it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
+ }
+ }
+
+ CodeGenerator::Finalize(allocator);
+}
+
+MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
+void ParallelMoveResolverMIPS::EmitMove(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
+}
+
+void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
+ DCHECK_LT(index, moves_.size());
+ MoveOperands* move = moves_[index];
+ Primitive::Type type = move->GetType();
+ Location loc1 = move->GetDestination();
+ Location loc2 = move->GetSource();
+
+ DCHECK(!loc1.IsConstant());
+ DCHECK(!loc2.IsConstant());
+
+ if (loc1.Equals(loc2)) {
+ return;
+ }
+
+ if (loc1.IsRegister() && loc2.IsRegister()) {
+ // Swap 2 GPRs.
+ Register r1 = loc1.AsRegister<Register>();
+ Register r2 = loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
+ FRegister f1 = loc1.AsFpuRegister<FRegister>();
+ FRegister f2 = loc2.AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(FTMP, f2);
+ __ MovS(f2, f1);
+ __ MovS(f1, FTMP);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ __ MovD(FTMP, f2);
+ __ MovD(f2, f1);
+ __ MovD(f1, FTMP);
+ }
+ } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegister())) {
+ // Swap FPR and GPR.
+ DCHECK_EQ(type, Primitive::kPrimFloat); // Can only swap a float.
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>()
+ : loc2.AsRegister<Register>();
+ __ Move(TMP, r2);
+ __ Mfc1(r2, f1);
+ __ Mtc1(TMP, f1);
+ } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
+ // Swap 2 GPR register pairs.
+ Register r1 = loc1.AsRegisterPairLow<Register>();
+ Register r2 = loc2.AsRegisterPairLow<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ r1 = loc1.AsRegisterPairHigh<Register>();
+ r2 = loc2.AsRegisterPairHigh<Register>();
+ __ Move(TMP, r2);
+ __ Move(r2, r1);
+ __ Move(r1, TMP);
+ } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
+ (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
+ // Swap FPR and GPR register pair.
+ DCHECK_EQ(type, Primitive::kPrimDouble);
+ FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
+ : loc2.AsFpuRegister<FRegister>();
+ Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
+ : loc2.AsRegisterPairLow<Register>();
+ Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
+ : loc2.AsRegisterPairHigh<Register>();
+ // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
+ // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
+ // unpredictable and the following mfch1 will fail.
+ __ Mfc1(TMP, f1);
+ __ Mfhc1(AT, f1);
+ __ Mtc1(r2_l, f1);
+ __ Mthc1(r2_h, f1);
+ __ Move(r2_l, TMP);
+ __ Move(r2_h, AT);
+ } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ } else {
+ LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
+ }
+}
+
+void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
+ __ Pop(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::SpillScratch(int reg) {
+ __ Push(static_cast<Register>(reg));
+}
+
+void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
+ // Allocate a scratch register other than TMP, if available.
+ // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
+ // automatically unspilled when the scratch scope object is destroyed).
+ ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
+ // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
+ int stack_offset = ensure_scratch.IsSpilled() ? kMipsWordSize : 0;
+ for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
+ __ LoadFromOffset(kLoadWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index1 + stack_offset);
+ __ LoadFromOffset(kLoadWord,
+ TMP,
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord,
+ Register(ensure_scratch.GetRegister()),
+ SP,
+ index2 + stack_offset);
+ __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
+ }
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::MipsCore(static_cast<int>(reg));
+}
+
+// TODO: mapping of floating-point registers to DWARF.
+
+void CodeGeneratorMIPS::GenerateFrameEntry() {
+ __ Bind(&frame_entry_label_);
+
+ bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips) || !IsLeafMethod();
+
+ if (do_overflow_check) {
+ __ LoadFromOffset(kLoadWord,
+ ZERO,
+ SP,
+ -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips)));
+ RecordPcInfo(nullptr, 0);
+ }
+
+ if (HasEmptyFrame()) {
+ return;
+ }
+
+ // Make sure the frame size isn't unreasonably large.
+ if (GetFrameSize() > GetStackOverflowReservedBytes(kMips)) {
+ LOG(FATAL) << "Stack frame larger than " << GetStackOverflowReservedBytes(kMips) << " bytes";
+ }
+
+ // Spill callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = FrameEntrySpillSize();
+ bool unaligned_float = ofs & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+ __ IncreaseFrameSize(ofs);
+
+ for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ ofs -= kMipsWordSize;
+ __ Sw(reg, SP, ofs);
+ __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ ofs -= kMipsDoublewordSize;
+ // TODO: Change the frame to avoid unaligned accesses for fpu registers.
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Swc1(reg, SP, ofs);
+ __ Swc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Mfhc1(TMP, reg);
+ __ Swc1(reg, SP, ofs);
+ __ Sw(TMP, SP, ofs + 4);
+ }
+ } else {
+ __ Sdc1(reg, SP, ofs);
+ }
+ // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
+ }
+ }
+
+ // Allocate the rest of the frame and store the current method pointer
+ // at its end.
+
+ __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ static_assert(IsInt<16>(kCurrentMethodStackOffset),
+ "kCurrentMethodStackOffset must fit into int16_t");
+ __ Sw(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
+}
+
+void CodeGeneratorMIPS::GenerateFrameExit() {
+ __ cfi().RememberState();
+
+ if (!HasEmptyFrame()) {
+ // Deallocate the rest of the frame.
+
+ __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
+
+ // Restore callee-saved registers.
+ // Note that their cumulative size is small and they can be indexed using
+ // 16-bit offsets.
+
+ // TODO: increment/decrement SP in one step instead of two or remove this comment.
+
+ uint32_t ofs = 0;
+ bool unaligned_float = FrameEntrySpillSize() & 0x7;
+ bool fpu_32bit = isa_features_.Is32BitFloatingPoint();
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ FRegister reg = kFpuCalleeSaves[i];
+ if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
+ if (unaligned_float) {
+ if (fpu_32bit) {
+ __ Lwc1(reg, SP, ofs);
+ __ Lwc1(static_cast<FRegister>(reg + 1), SP, ofs + 4);
+ } else {
+ __ Lwc1(reg, SP, ofs);
+ __ Lw(TMP, SP, ofs + 4);
+ __ Mthc1(TMP, reg);
+ }
+ } else {
+ __ Ldc1(reg, SP, ofs);
+ }
+ ofs += kMipsDoublewordSize;
+ // TODO: __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ Register reg = kCoreCalleeSaves[i];
+ if (allocated_registers_.ContainsCoreRegister(reg)) {
+ __ Lw(reg, SP, ofs);
+ ofs += kMipsWordSize;
+ __ cfi().Restore(DWARFReg(reg));
+ }
+ }
+
+ DCHECK_EQ(ofs, FrameEntrySpillSize());
+ __ DecreaseFrameSize(ofs);
+ }
+
+ __ Jr(RA);
+ __ Nop();
+
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(GetFrameSize());
+}
+
+void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorMIPS::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (src.Equals(dst)) {
+ return;
+ }
+
+ if (src.IsConstant()) {
+ MoveConstant(dst, src.GetConstant());
+ } else {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move32(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegister()) {
+ if (source.IsRegister()) {
+ __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
+ } else if (source.IsFpuRegister()) {
+ __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegister()) {
+ __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
+ } else if (source.IsFpuRegister()) {
+ __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsStackSlot()) << destination;
+ if (source.IsRegister()) {
+ __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, destination.GetStackIndex());
+ } else if (source.IsFpuRegister()) {
+ __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
+ } else {
+ DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::Move64(Location destination, Location source) {
+ if (source.Equals(destination)) {
+ return;
+ }
+
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
+ __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ } else if (source.IsFpuRegister()) {
+ Register dst_high = destination.AsRegisterPairHigh<Register>();
+ Register dst_low = destination.AsRegisterPairLow<Register>();
+ FRegister src = source.AsFpuRegister<FRegister>();
+ __ Mfc1(dst_low, src);
+ __ Mfhc1(dst_high, src);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ int32_t off = source.GetStackIndex();
+ Register r = destination.AsRegisterPairLow<Register>();
+ __ LoadFromOffset(kLoadDoubleword, r, SP, off);
+ }
+ } else if (destination.IsFpuRegister()) {
+ if (source.IsRegisterPair()) {
+ FRegister dst = destination.AsFpuRegister<FRegister>();
+ Register src_high = source.AsRegisterPairHigh<Register>();
+ Register src_low = source.AsRegisterPairLow<Register>();
+ __ Mtc1(src_low, dst);
+ __ Mthc1(src_high, dst);
+ } else if (source.IsFpuRegister()) {
+ __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
+ }
+ } else {
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
+ int32_t off = destination.GetStackIndex();
+ if (source.IsRegisterPair()) {
+ __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, off);
+ } else if (source.IsFpuRegister()) {
+ __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, off);
+ } else {
+ DCHECK(source.IsDoubleStackSlot()) << "Cannot move from " << source << " to " << destination;
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, TMP, SP, off);
+ __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
+ __ StoreToOffset(kStoreWord, TMP, SP, off + 4);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
+ if (c->IsIntConstant() || c->IsNullConstant()) {
+ // Move 32 bit constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsRegister()) {
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsLongConstant()) {
+ // Move 64 bit constant.
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsRegisterPair()) {
+ Register r_h = destination.AsRegisterPairHigh<Register>();
+ Register r_l = destination.AsRegisterPairLow<Register>();
+ __ LoadConst64(r_h, r_l, value);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else if (c->IsFloatConstant()) {
+ // Move 32 bit float constant.
+ int32_t value = GetInt32ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
+ } else {
+ DCHECK(destination.IsStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ } else {
+ // Move 64 bit double constant.
+ DCHECK(c->IsDoubleConstant()) << c->DebugName();
+ int64_t value = GetInt64ValueOf(c);
+ if (destination.IsFpuRegister()) {
+ FRegister fd = destination.AsFpuRegister<FRegister>();
+ __ LoadDConst64(fd, value, TMP);
+ } else {
+ DCHECK(destination.IsDoubleStackSlot())
+ << "Cannot move " << c->DebugName() << " to " << destination;
+ __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ }
+ }
+}
+
+void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
+ DCHECK(destination.IsRegister());
+ Register dst = destination.AsRegister<Register>();
+ __ LoadConst32(dst, value);
+}
+
+void CodeGeneratorMIPS::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type type = instruction->GetType();
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ } else if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
+ MoveConstant(location, instruction->AsConstant());
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, Location::DoubleStackSlot(stack_slot));
+ } else {
+ Move32(location, Location::StackSlot(stack_slot));
+ }
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ if (Primitive::Is64BitType(type)) {
+ Move64(location, locations->Out());
+ } else {
+ Move32(location, locations->Out());
+ }
+ }
+}
+
+void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
+Location CodeGeneratorMIPS::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorMIPS::MarkGCCard(Register object, Register value) {
+ MipsLabel done;
+ Register card = AT;
+ Register temp = TMP;
+ __ Beqz(value, &done);
+ __ LoadFromOffset(kLoadWord,
+ card,
+ TR,
+ Thread::CardTableOffset<kMipsWordSize>().Int32Value());
+ __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Addu(temp, card, temp);
+ __ Sb(card, temp, 0);
+ __ Bind(&done);
+}
+
+void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+ // Don't allocate the dalvik style register pair passing.
+ blocked_register_pairs_[A1_A2] = true;
+
+ // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
+ blocked_core_registers_[ZERO] = true;
+ blocked_core_registers_[K0] = true;
+ blocked_core_registers_[K1] = true;
+ blocked_core_registers_[GP] = true;
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[RA] = true;
+
+ // AT and TMP(T8) are used as temporary/scratch registers
+ // (similar to how AT is used by MIPS assemblers).
+ blocked_core_registers_[AT] = true;
+ blocked_core_registers_[TMP] = true;
+ blocked_fpu_registers_[FTMP] = true;
+
+ // Reserve suspend and thread registers.
+ blocked_core_registers_[S0] = true;
+ blocked_core_registers_[TR] = true;
+
+ // Reserve T9 for function calls
+ blocked_core_registers_[T9] = true;
+
+ // Reserve odd-numbered FPU registers.
+ for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
+ blocked_fpu_registers_[i] = true;
+ }
+
+ if (is_baseline) {
+ for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
+ blocked_core_registers_[kCoreCalleeSaves[i]] = true;
+ }
+
+ for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
+ blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
+ }
+ }
+
+ UpdateBlockedPairRegisters();
+}
+
+void CodeGeneratorMIPS::UpdateBlockedPairRegisters() const {
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (blocked_core_registers_[current.AsRegisterPairLow()]
+ || blocked_core_registers_[current.AsRegisterPairHigh()]) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+}
+
+Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
+ switch (type) {
+ case Primitive::kPrimLong: {
+ size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
+ MipsManagedRegister pair =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
+ DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
+
+ blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+ blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
+ UpdateBlockedPairRegisters();
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
+ }
+
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
+ // Block all register pairs that contain `reg`.
+ for (int i = 0; i < kNumberOfRegisterPairs; i++) {
+ MipsManagedRegister current =
+ MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
+ if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
+ blocked_register_pairs_[i] = true;
+ }
+ }
+ return Location::RegisterLocation(reg);
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
+ return Location::FpuRegisterLocation(reg);
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ UNREACHABLE();
+}
+
+size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
+ return kMipsWordSize;
+}
+
+size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
+ return kMipsDoublewordSize;
+}
+
+void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromCoreRegister(Register(reg));
+}
+
+void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << MipsManagedRegister::FromFRegister(FRegister(reg));
+}
+
+void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) {
+ InvokeRuntime(GetThreadOffset<kMipsWordSize>(entrypoint).Int32Value(),
+ instruction,
+ dex_pc,
+ slow_path,
+ IsDirectEntrypoint(entrypoint));
+}
+
+constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
+
+void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint) {
+ if (is_direct_entrypoint) {
+ // Reserve argument space on stack (for $a0-$a3) for
+ // entrypoints that directly reference native implementations.
+ // Called function may use this space to store $a0-$a3 regs.
+ __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
+ __ Jalr(T9);
+ __ Nop();
+ if (is_direct_entrypoint) {
+ __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
+ }
+ RecordPcInfo(instruction, dex_pc, slow_path);
+}
+
+void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
+ Register class_reg) {
+ __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
+ __ LoadConst32(AT, mirror::Class::kStatusInitialized);
+ __ Blt(TMP, AT, slow_path->GetEntryLabel());
+ // Even if the initialized flag is set, we need to ensure consistent memory ordering.
+ __ Sync(0);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
+ __ Sync(0); // Only stype 0 is supported.
+}
+
+void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
+ HBasicBlock* successor) {
+ SuspendCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS(instruction, successor);
+ codegen_->AddSlowPath(slow_path);
+
+ __ LoadFromOffset(kLoadUnsignedHalfword,
+ TMP,
+ TR,
+ Thread::ThreadFlagsOffset<kMipsWordSize>().Int32Value());
+ if (successor == nullptr) {
+ __ Bnez(TMP, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetReturnLabel());
+ } else {
+ __ Beqz(TMP, codegen_->GetLabelOf(successor));
+ __ B(slow_path->GetEntryLabel());
+ // slow_path will return to GetLabelOf(successor).
+ }
+}
+
+InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
+ CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ DCHECK_EQ(instruction->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type type = instruction->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ HInstruction* right = instruction->InputAt(1);
+ bool can_use_imm = false;
+ if (right->IsConstant()) {
+ int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
+ if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
+ can_use_imm = IsUint<16>(imm);
+ } else if (instruction->IsAdd()) {
+ can_use_imm = IsInt<16>(imm);
+ } else {
+ DCHECK(instruction->IsSub());
+ can_use_imm = IsInt<16>(-imm);
+ }
+ }
+ if (can_use_imm)
+ locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+ else
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->IsAdd() || instruction->IsSub()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ } else {
+ DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ DCHECK(instruction->IsAdd() || instruction->IsSub());
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ if (instruction->IsAnd()) {
+ if (use_imm)
+ __ Andi(dst, lhs, rhs_imm);
+ else
+ __ And(dst, lhs, rhs_reg);
+ } else if (instruction->IsOr()) {
+ if (use_imm)
+ __ Ori(dst, lhs, rhs_imm);
+ else
+ __ Or(dst, lhs, rhs_reg);
+ } else if (instruction->IsXor()) {
+ if (use_imm)
+ __ Xori(dst, lhs, rhs_imm);
+ else
+ __ Xor(dst, lhs, rhs_reg);
+ } else if (instruction->IsAdd()) {
+ if (use_imm)
+ __ Addiu(dst, lhs, rhs_imm);
+ else
+ __ Addu(dst, lhs, rhs_reg);
+ } else {
+ DCHECK(instruction->IsSub());
+ if (use_imm)
+ __ Addiu(dst, lhs, -rhs_imm);
+ else
+ __ Subu(dst, lhs, rhs_reg);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ // TODO: can 2nd param be const?
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ if (instruction->IsAnd()) {
+ __ And(dst_low, lhs_low, rhs_low);
+ __ And(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsOr()) {
+ __ Or(dst_low, lhs_low, rhs_low);
+ __ Or(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsXor()) {
+ __ Xor(dst_low, lhs_low, rhs_low);
+ __ Xor(dst_high, lhs_high, rhs_high);
+ } else if (instruction->IsAdd()) {
+ __ Addu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, dst_low, lhs_low);
+ __ Addu(dst_high, lhs_high, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ } else {
+ DCHECK(instruction->IsSub());
+ __ Subu(dst_low, lhs_low, rhs_low);
+ __ Sltu(TMP, lhs_low, dst_low);
+ __ Subu(dst_high, lhs_high, rhs_high);
+ __ Subu(dst_high, dst_high, TMP);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (instruction->IsAdd()) {
+ if (type == Primitive::kPrimFloat) {
+ __ AddS(dst, lhs, rhs);
+ } else {
+ __ AddD(dst, lhs, rhs);
+ }
+ } else {
+ DCHECK(instruction->IsSub());
+ if (type == Primitive::kPrimFloat) {
+ __ SubS(dst, lhs, rhs);
+ } else {
+ __ SubD(dst, lhs, rhs);
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected binary operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected shift type " << type;
+ }
+}
+
+static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
+
+void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
+ DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
+ LocationSummary* locations = instr->GetLocations();
+ Primitive::Type type = instr->GetType();
+
+ Location rhs_location = locations->InAt(1);
+ bool use_imm = rhs_location.IsConstant();
+ Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
+ int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
+ uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue;
+ uint32_t shift_value = rhs_imm & shift_mask;
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ if (use_imm) {
+ if (instr->IsShl()) {
+ __ Sll(dst, lhs, shift_value);
+ } else if (instr->IsShr()) {
+ __ Sra(dst, lhs, shift_value);
+ } else {
+ __ Srl(dst, lhs, shift_value);
+ }
+ } else {
+ if (instr->IsShl()) {
+ __ Sllv(dst, lhs, rhs_reg);
+ } else if (instr->IsShr()) {
+ __ Srav(dst, lhs, rhs_reg);
+ } else {
+ __ Srlv(dst, lhs, rhs_reg);
+ }
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ if (use_imm) {
+ if (shift_value == 0) {
+ codegen_->Move64(locations->Out(), locations->InAt(0));
+ } else if (shift_value < kMipsBitsPerWord) {
+ if (instr->IsShl()) {
+ __ Sll(dst_low, lhs_low, shift_value);
+ __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
+ __ Sll(dst_high, lhs_high, shift_value);
+ __ Or(dst_high, dst_high, TMP);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ } else {
+ __ Srl(dst_high, lhs_high, shift_value);
+ __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
+ __ Srl(dst_low, lhs_low, shift_value);
+ __ Or(dst_low, dst_low, TMP);
+ }
+ } else {
+ shift_value -= kMipsBitsPerWord;
+ if (instr->IsShl()) {
+ __ Sll(dst_high, lhs_low, shift_value);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Sra(dst_low, lhs_high, shift_value);
+ __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
+ } else {
+ __ Srl(dst_low, lhs_high, shift_value);
+ __ Move(dst_high, ZERO);
+ }
+ }
+ } else {
+ MipsLabel done;
+ if (instr->IsShl()) {
+ __ Sllv(dst_low, lhs_low, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Srl(TMP, lhs_low, 1);
+ __ Srlv(TMP, TMP, AT);
+ __ Sllv(dst_high, lhs_high, rhs_reg);
+ __ Or(dst_high, dst_high, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_high, dst_low);
+ __ Move(dst_low, ZERO);
+ } else if (instr->IsShr()) {
+ __ Srav(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Sra(dst_high, dst_high, 31);
+ } else {
+ __ Srlv(dst_high, lhs_high, rhs_reg);
+ __ Nor(AT, ZERO, rhs_reg);
+ __ Sll(TMP, lhs_high, 1);
+ __ Sllv(TMP, TMP, AT);
+ __ Srlv(dst_low, lhs_low, rhs_reg);
+ __ Or(dst_low, dst_low, TMP);
+ __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
+ __ Beqz(TMP, &done);
+ __ Move(dst_low, dst_high);
+ __ Move(dst_high, ZERO);
+ }
+ __ Bind(&done);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected shift operation type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimBoolean: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register out = locations->Out().AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadWord, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register out = locations->Out().AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ LoadSFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ LoadSFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister out = locations->Out().AsFpuRegister<FRegister>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ LoadDFromOffset(out, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ LoadDFromOffset(out, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, obj, offset);
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+}
+
+void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool is_object = value_type == Primitive::kPrimNot;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
+ if (is_object) {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(2, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Location index = locations->InAt(1);
+ Primitive::Type value_type = instruction->GetComponentType();
+ bool needs_runtime_call = locations->WillCall();
+ bool needs_write_barrier =
+ CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
+
+ switch (value_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
+ __ StoreToOffset(kStoreByte, value, obj, offset);
+ } else {
+ __ Addu(TMP, obj, index.AsRegister<Register>());
+ __ StoreToOffset(kStoreByte, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
+ __ StoreToOffset(kStoreHalfword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot: {
+ if (!needs_runtime_call) {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegister<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreToOffset(kStoreWord, value, obj, offset);
+ } else {
+ DCHECK(index.IsRegister()) << index;
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreWord, value, TMP, data_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
+ } else {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAputObject));
+ CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
+ }
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
+ Register value = locations->InAt(2).AsRegisterPairLow<Register>();
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreToOffset(kStoreDoubleword, value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
+ __ StoreSToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(TMP, obj, TMP);
+ __ StoreSToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
+ FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
+ DCHECK(locations->InAt(2).IsFpuRegister());
+ if (index.IsConstant()) {
+ size_t offset =
+ (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
+ __ StoreDToOffset(value, obj, offset);
+ } else {
+ __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(TMP, obj, TMP);
+ __ StoreDToOffset(value, TMP, data_offset);
+ }
+ break;
+ }
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
+ }
+
+ // Ints and objects are handled in the switch.
+ if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathMIPS* slow_path =
+ new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Register index = locations->InAt(0).AsRegister<Register>();
+ Register length = locations->InAt(1).AsRegister<Register>();
+
+ // length is limited by the maximum positive signed 32-bit integer.
+ // Unsigned comparison of length and index checks for index < 0
+ // and for length <= index simultaneously.
+ __ Bgeu(index, length, slow_path->GetEntryLabel());
+}
+
+void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction,
+ LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register obj_cls = locations->GetTemp(0).AsRegister<Register>();
+
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ Beqz(obj, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
+ __ Bne(obj_cls, cls, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (check->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
+ // We assume the class is not null.
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ check->GetLoadClass(),
+ check,
+ check->GetDexPc(),
+ true);
+ codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path,
+ check->GetLocations()->InAt(0).AsRegister<Register>());
+}
+
+void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
+ Primitive::Type in_type = compare->InputAt(0)->GetType();
+
+ LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
+
+ switch (in_type) {
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // Output overlaps because it is written before doing the low comparison.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for compare operation " << in_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ // 0 if: left == right
+ // 1 if: left > right
+ // -1 if: left < right
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ MipsLabel done;
+ Register res = locations->Out().AsRegister<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+ // TODO: more efficient (direct) comparison with a constant.
+ __ Slt(TMP, lhs_high, rhs_high);
+ __ Slt(AT, rhs_high, lhs_high); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bnez(res, &done); // If we compared ==, check if lower bits are also equal.
+ __ Sltu(TMP, lhs_low, rhs_low);
+ __ Sltu(AT, rhs_low, lhs_low); // Inverted: is actually gt.
+ __ Subu(res, AT, TMP); // Result -1:1:0 for [ <, >, == ].
+ __ Bind(&done);
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ int32_t entry_point_offset;
+ bool direct;
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgFloat);
+ direct = IsDirectEntrypoint(kQuickCmpgFloat);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplFloat);
+ direct = IsDirectEntrypoint(kQuickCmplFloat);
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmpgDouble);
+ direct = IsDirectEntrypoint(kQuickCmpgDouble);
+ } else {
+ entry_point_offset = QUICK_ENTRY_POINT(pCmplDouble);
+ direct = IsDirectEntrypoint(kQuickCmplDouble);
+ }
+ }
+ codegen_->InvokeRuntime(entry_point_offset,
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ direct);
+ if (in_type == Primitive::kPrimFloat) {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgFloat, int32_t, float, float>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplFloat, int32_t, float, float>();
+ }
+ } else {
+ if (instruction->IsGtBias()) {
+ CheckEntrypointTypes<kQuickCmpgDouble, int32_t, double, double>();
+ } else {
+ CheckEntrypointTypes<kQuickCmplDouble, int32_t, double, double>();
+ }
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register dst = locations->Out().AsRegister<Register>();
+
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Location rhs_location = locations->InAt(1);
+
+ Register rhs_reg = ZERO;
+ int64_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = instruction->GetCondition();
+
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ if (use_imm && IsUint<16>(rhs_imm)) {
+ __ Xori(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Xor(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondEQ) {
+ __ Sltiu(dst, dst, 1);
+ } else {
+ __ Sltu(dst, ZERO, dst);
+ }
+ break;
+
+ case kCondLT:
+ case kCondGE:
+ if (use_imm && IsInt<16>(rhs_imm)) {
+ __ Slti(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondGE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the slt instruction but no sge.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondLE:
+ case kCondGT:
+ if (use_imm && IsInt<16>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Slti(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondGT) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the slti instruction but no sgti.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Slt(dst, rhs_reg, lhs);
+ if (if_cond == kCondLE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the slt instruction but no sle.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+
+ case kCondB:
+ case kCondAE:
+ // Use sltiu instruction if rhs_imm is in range [0, 32767] or in
+ // [max_unsigned - 32767 = 0xffff8000, max_unsigned = 0xffffffff].
+ if (use_imm &&
+ (IsUint<15>(rhs_imm) ||
+ IsUint<15>(rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm)) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff]).
+ __ Sltiu(dst, lhs, rhs_imm - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ // Use sltiu instruction if rhs_imm is in range [0, 32766] or in
+ // [max_unsigned - 32767 - 1 = 0xffff7fff, max_unsigned - 1 = 0xfffffffe].
+ // lhs <= rhs is simulated via lhs < rhs + 1.
+ if (use_imm && (rhs_imm != -1) &&
+ (IsUint<15>(rhs_imm + 1) ||
+ IsUint<15>(rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(15))))) {
+ if (IsUint<15>(rhs_imm + 1)) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ } else {
+ // 16-bit value (in range [0x8000, 0xffff]) passed to sltiu is sign-extended
+ // and then used as unsigned integer (range [0xffff8000, 0xffffffff] where rhs_imm
+ // is in range [0xffff7fff, 0xfffffffe] since lhs <= rhs is simulated via lhs < rhs + 1).
+ __ Sltiu(dst, lhs, rhs_imm + 1 - (MaxInt<uint64_t>(32) - MaxInt<uint64_t>(16)));
+ }
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
+ Primitive::Type type = div->GetResultType();
+ LocationSummary::CallKind call_kind = (type == Primitive::kPrimLong)
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ DivR6(dst, lhs, rhs);
+ } else {
+ __ DivR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLdiv));
+ CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ DivS(dst, lhs, rhs);
+ } else {
+ __ DivD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected div type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ Location value = instruction->GetLocations()->InAt(0);
+ Primitive::Type type = instruction->GetType();
+
+ switch (type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegister()) << value;
+ __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsConstant()) {
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ B(slow_path->GetEntryLabel());
+ } else {
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
+ }
+ } else {
+ DCHECK(value.IsRegisterPair()) << value;
+ __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
+ __ Beqz(TMP, slow_path->GetEntryLabel());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
+ }
+}
+
+void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
+ DCHECK(!successor->IsExitBlock());
+ HBasicBlock* block = got->GetBlock();
+ HInstruction* previous = got->GetPrevious();
+ HLoopInformation* info = block->GetLoopInformation();
+
+ if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
+ GenerateSuspendCheck(info->GetSuspendCheck(), successor);
+ return;
+ }
+ if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
+ GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
+ }
+ if (!codegen_->GoesToNextBlock(block, successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
+ HandleGoto(got, got->GetSuccessor());
+}
+
+void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ try_boundary->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
+ HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
+ if (!successor->IsExitBlock()) {
+ HandleGoto(try_boundary, successor);
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target) {
+ HInstruction* cond = instruction->InputAt(0);
+ HCondition* condition = cond->AsCondition();
+
+ if (cond->IsIntConstant()) {
+ int32_t cond_value = cond->AsIntConstant()->GetValue();
+ if (cond_value == 1) {
+ if (always_true_target != nullptr) {
+ __ B(always_true_target);
+ }
+ return;
+ } else {
+ DCHECK_EQ(cond_value, 0);
+ }
+ } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = instruction->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Bnez(cond_val.AsRegister<Register>(), true_target);
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = condition->GetLocations()->InAt(0).AsRegister<Register>();
+ Location rhs_location = condition->GetLocations()->InAt(1);
+ Register rhs_reg = ZERO;
+ int32_t rhs_imm = 0;
+ bool use_imm = rhs_location.IsConstant();
+ if (use_imm) {
+ rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
+ } else {
+ rhs_reg = rhs_location.AsRegister<Register>();
+ }
+
+ IfCondition if_cond = condition->GetCondition();
+ if (use_imm && rhs_imm == 0) {
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beqz(lhs, true_target);
+ break;
+ case kCondNE:
+ __ Bnez(lhs, true_target);
+ break;
+ case kCondLT:
+ __ Bltz(lhs, true_target);
+ break;
+ case kCondGE:
+ __ Bgez(lhs, true_target);
+ break;
+ case kCondLE:
+ __ Blez(lhs, true_target);
+ break;
+ case kCondGT:
+ __ Bgtz(lhs, true_target);
+ break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqz(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnez(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
+ }
+ } else {
+ if (use_imm) {
+ // TODO: more efficient comparison with 16-bit constants without loading them into TMP.
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ switch (if_cond) {
+ case kCondEQ:
+ __ Beq(lhs, rhs_reg, true_target);
+ break;
+ case kCondNE:
+ __ Bne(lhs, rhs_reg, true_target);
+ break;
+ case kCondLT:
+ __ Blt(lhs, rhs_reg, true_target);
+ break;
+ case kCondGE:
+ __ Bge(lhs, rhs_reg, true_target);
+ break;
+ case kCondLE:
+ __ Bge(rhs_reg, lhs, true_target);
+ break;
+ case kCondGT:
+ __ Blt(rhs_reg, lhs, true_target);
+ break;
+ case kCondB:
+ __ Bltu(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeu(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeu(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltu(rhs_reg, lhs, true_target);
+ break;
+ }
+ }
+ }
+ if (false_target != nullptr) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
+ MipsLabel* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ MipsLabel* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+ MipsLabel* always_true_target = true_target;
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfTrueSuccessor())) {
+ always_true_target = nullptr;
+ }
+ if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
+ if_instr->IfFalseSuccessor())) {
+ false_target = nullptr;
+ }
+ GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
+}
+
+void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ LocationSummary* locations = new (GetGraph()->GetArena())
+ LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
+ HInstruction* cond = deoptimize->InputAt(0);
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena())
+ DeoptimizationSlowPathMIPS(deoptimize);
+ codegen_->AddSlowPath(slow_path);
+ MipsLabel* slow_path_entry = slow_path->GetEntryLabel();
+ GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
+}
+
+void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimLong));
+ } else {
+ locations->SetOut(Location::RequiresFpuRegister());
+ // Need some temp core regs since FP results are returned in core registers
+ Location reg = calling_convention.GetReturnLocation(Primitive::kPrimLong);
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(instruction->GetType())) {
+ locations->SetOut(Location::RequiresFpuRegister());
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ LoadOperandType load_type = kLoadUnsignedByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ load_type = kLoadUnsignedByte;
+ break;
+ case Primitive::kPrimByte:
+ load_type = kLoadSignedByte;
+ break;
+ case Primitive::kPrimShort:
+ load_type = kLoadSignedHalfword;
+ break;
+ case Primitive::kPrimChar:
+ load_type = kLoadUnsignedHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ load_type = kLoadWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ load_type = kLoadDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile && load_type == kLoadDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Load),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Load));
+ CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
+ if (type == Primitive::kPrimDouble) {
+ // Need to move to FP regs since FP results are returned in core registers.
+ __ Mtc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ __ Mthc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->Out().AsFpuRegister<FRegister>());
+ }
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register dst;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->Out().IsRegisterPair());
+ dst = locations->Out().AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->Out().IsRegister());
+ dst = locations->Out().AsRegister<Register>();
+ }
+ __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->Out().IsFpuRegister());
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ LoadSFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ LoadDFromOffset(dst, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
+ }
+}
+
+void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
+ Primitive::Type field_type = field_info.GetFieldType();
+ bool is_wide = (field_type == Primitive::kPrimLong) || (field_type == Primitive::kPrimDouble);
+ bool generate_volatile = field_info.IsVolatile() && is_wide;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, generate_volatile ? LocationSummary::kCall : LocationSummary::kNoCall);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (generate_volatile) {
+ InvokeRuntimeCallingConvention calling_convention;
+ // need A0 to hold base + offset
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ if (field_type == Primitive::kPrimLong) {
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ } else {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ // Pass FP parameters in core registers.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ }
+ } else {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ }
+}
+
+void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
+ const FieldInfo& field_info,
+ uint32_t dex_pc) {
+ Primitive::Type type = field_info.GetFieldType();
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ StoreOperandType store_type = kStoreByte;
+ bool is_volatile = field_info.IsVolatile();
+
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ store_type = kStoreByte;
+ break;
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar:
+ store_type = kStoreHalfword;
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimNot:
+ store_type = kStoreWord;
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ store_type = kStoreDoubleword;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << type;
+ UNREACHABLE();
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
+ }
+
+ if (is_volatile && store_type == kStoreDoubleword) {
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Addiu32(locations->GetTemp(0).AsRegister<Register>(),
+ obj, field_info.GetFieldOffset().Uint32Value());
+ // Do implicit Null check.
+ __ Lw(ZERO, locations->GetTemp(0).AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ if (type == Primitive::kPrimDouble) {
+ // Pass FP parameters in core registers.
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ __ Mfhc1(locations->GetTemp(2).AsRegister<Register>(),
+ locations->InAt(1).AsFpuRegister<FRegister>());
+ }
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pA64Store),
+ instruction,
+ dex_pc,
+ nullptr,
+ IsDirectEntrypoint(kQuickA64Store));
+ CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
+ } else {
+ if (!Primitive::IsFloatingPointType(type)) {
+ Register src;
+ if (type == Primitive::kPrimLong) {
+ DCHECK(locations->InAt(1).IsRegisterPair());
+ src = locations->InAt(1).AsRegisterPairLow<Register>();
+ } else {
+ DCHECK(locations->InAt(1).IsRegister());
+ src = locations->InAt(1).AsRegister<Register>();
+ }
+ __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ DCHECK(locations->InAt(1).IsFpuRegister());
+ FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ StoreSToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ } else {
+ __ StoreDToOffset(src, obj, field_info.GetFieldOffset().Uint32Value());
+ }
+ }
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ }
+
+ // TODO: memory barriers?
+ if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
+ DCHECK(locations->InAt(1).IsRegister());
+ Register src = locations->InAt(1).AsRegister<Register>();
+ codegen_->MarkGCCard(obj, src);
+ }
+
+ if (is_volatile) {
+ GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
+ }
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary::CallKind call_kind =
+ instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ // The output does overlap inputs.
+ // Note that TypeCheckSlowPathMIPS uses this register too.
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register cls = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ MipsLabel done;
+
+ // Return 0 if `obj` is null.
+ // TODO: Avoid this check if we know `obj` is not null.
+ __ Move(out, ZERO);
+ __ Beqz(obj, &done);
+
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, mirror::Object::ClassOffset().Int32Value());
+ if (instruction->IsExactCheck()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ Xor(out, out, cls);
+ __ Sltiu(out, out, 1);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+ __ Bne(out, cls, slow_path->GetEntryLabel());
+ __ LoadConst32(out, 1);
+ __ Bind(slow_path->GetExitLabel());
+ }
+
+ __ Bind(&done);
+}
+
+void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
+ InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
+ CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
+}
+
+void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ HandleInvoke(invoke);
+ // The register T0 is required to be used for the hidden argument in
+ // art_quick_imt_conflict_trampoline, so add the hidden argument.
+ invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
+ // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kMipsPointerSize).Uint32Value();
+ Location receiver = invoke->GetLocations()->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // Set the hidden argument.
+ __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
+ invoke->GetDexMethodIndex());
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetImtEntryAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ // TODO: intrinsic function.
+ HandleInvoke(invoke);
+}
+
+static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen ATTRIBUTE_UNUSED) {
+ if (invoke->GetLocations()->Intrinsified()) {
+ // TODO: intrinsic function.
+ return true;
+ }
+ return false;
+}
+
+void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
+ // All registers are assumed to be correctly set up per the calling convention.
+
+ Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
+ switch (invoke->GetMethodLoadKind()) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
+ // temp = thread->string_init_entrypoint
+ __ LoadFromOffset(kLoadWord,
+ temp.AsRegister<Register>(),
+ TR,
+ invoke->GetStringInitOffset());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
+ __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ // TODO: Implement this type. (Needs literal support.) At the moment, the
+ // CompilerDriver will not direct the backend to use this type for MIPS.
+ LOG(FATAL) << "Unsupported!";
+ UNREACHABLE();
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
+ Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
+ Register reg = temp.AsRegister<Register>();
+ Register method_reg;
+ if (current_method.IsRegister()) {
+ method_reg = current_method.AsRegister<Register>();
+ } else {
+ // TODO: use the appropriate DCHECK() here if possible.
+ // DCHECK(invoke->GetLocations()->Intrinsified());
+ DCHECK(!current_method.IsValid());
+ method_reg = reg;
+ __ Lw(reg, SP, kCurrentMethodStackOffset);
+ }
+
+ // temp = temp->dex_cache_resolved_methods_;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ method_reg,
+ ArtMethod::DexCacheResolvedMethodsOffset(kMipsPointerSize).Int32Value());
+ // temp = temp[index_in_cache]
+ uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
+ __ LoadFromOffset(kLoadWord,
+ reg,
+ reg,
+ CodeGenerator::GetCachePointerOffset(index_in_cache));
+ break;
+ }
+ }
+
+ switch (invoke->GetCodePtrLocation()) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
+ __ Jalr(&frame_entry_label_, T9);
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // LR = invoke->GetDirectCodePtr();
+ __ LoadConst32(T9, invoke->GetDirectCodePtr());
+ // LR()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
+ FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
+ // T9 = callee_method->entry_point_from_quick_compiled_code_;
+ __ LoadFromOffset(kLoadDoubleword,
+ T9,
+ callee_method.AsRegister<Register>(),
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kMipsWordSize).Int32Value());
+ // T9()
+ __ Jalr(T9);
+ __ Nop();
+ break;
+ }
+ DCHECK(!IsLeafMethod());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+ codegen_->GenerateStaticOrDirectCall(invoke,
+ locations->HasTemps()
+ ? locations->GetTemp(0)
+ : Location::NoLocation());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ // TODO: Try to generate intrinsics code.
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ }
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // temp = temp->GetMethodAt(method_offset);
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
+ // T9 = temp->GetEntryPoint();
+ __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
+ // T9();
+ __ Jalr(T9);
+ __ Nop();
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DeclaringClassOffset().Int32Value());
+ } else {
+ DCHECK(cls->CanCallRuntime());
+ __ LoadFromOffset(kLoadWord, out, current_method,
+ ArtMethod::DexCacheResolvedTypesOffset(kMipsPointerSize).Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS(
+ cls,
+ cls,
+ cls->GetDexPc(),
+ cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ Beqz(out, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ }
+}
+
+static int32_t GetExceptionTlsOffset() {
+ return Thread::ExceptionOffset<kMipsWordSize>().Int32Value();
+}
+
+void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
+ Register out = load->GetLocations()->Out().AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
+ new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
+}
+
+void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
+ __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
+}
+
+void LocationsBuilderMIPS::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS(load);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = load->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ __ LoadFromOffset(kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ Beqz(out, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderMIPS::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
+ if (instruction->IsEnter()) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLockObject));
+ CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
+ } else {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickUnlockObject));
+ }
+ CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+
+ if (isR6) {
+ __ MulR6(dst, lhs, rhs);
+ } else {
+ __ MulR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
+ Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
+
+ // Extra checks to protect caused by the existance of A1_A2.
+ // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
+ // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
+ DCHECK_NE(dst_high, lhs_low);
+ DCHECK_NE(dst_high, rhs_low);
+
+ // A_B * C_D
+ // dst_hi: [ low(A*D) + low(B*C) + hi(B*D) ]
+ // dst_lo: [ low(B*D) ]
+ // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
+
+ if (isR6) {
+ __ MulR6(TMP, lhs_high, rhs_low);
+ __ MulR6(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MuhuR6(TMP, lhs_low, rhs_low);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MulR6(dst_low, lhs_low, rhs_low);
+ } else {
+ __ MulR2(TMP, lhs_high, rhs_low);
+ __ MulR2(dst_high, lhs_low, rhs_high);
+ __ Addu(dst_high, dst_high, TMP);
+ __ MultuR2(lhs_low, rhs_low);
+ __ Mfhi(TMP);
+ __ Addu(dst_high, dst_high, TMP);
+ __ Mflo(dst_low);
+ }
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
+ FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ MulS(dst, lhs, rhs);
+ } else {
+ __ MulD(dst, lhs, rhs);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected mul type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
+ switch (neg->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Subu(dst, ZERO, src);
+ break;
+ }
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Subu(dst_low, ZERO, src_low);
+ __ Sltu(TMP, ZERO, dst_low);
+ __ Subu(dst_high, ZERO, src_high);
+ __ Subu(dst_high, dst_high, TMP);
+ break;
+ }
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (type == Primitive::kPrimFloat) {
+ __ NegS(dst, src);
+ } else {
+ __ NegD(dst, src);
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected neg type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(2);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocArrayWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
+ void*, uint32_t, int32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
+ InvokeRuntimeCallingConvention calling_convention;
+ Register current_method_register = calling_convention.GetRegisterAt(1);
+ __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
+ // Move an uint16_t value to a register.
+ __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
+ codegen_->InvokeRuntime(
+ GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickAllocObjectWithAccessCheck));
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
+}
+
+void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+ __ Nor(dst, src, ZERO);
+ break;
+ }
+
+ case Primitive::kPrimLong: {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
+ Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
+ __ Nor(dst_high, src_high, ZERO);
+ __ Nor(dst_low, src_low, ZERO);
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ __ Xori(locations->Out().AsRegister<Register>(),
+ locations->InAt(0).AsRegister<Register>(),
+ 1);
+}
+
+void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
+ if (codegen_->CanMoveNullCheckToUser(instruction)) {
+ return;
+ }
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Lw(ZERO, obj.AsRegister<Register>(), 0);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void InstructionCodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeMIPS* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ Location obj = instruction->GetLocations()->InAt(0);
+
+ __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
+}
+
+void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
+ if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
+ GenerateImplicitNullCheck(instruction);
+ } else {
+ GenerateExplicitNullCheck(instruction);
+ }
+}
+
+void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
+void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
+ ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
+}
+
+void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitRem(HRem* rem) {
+ Primitive::Type type = rem->GetResultType();
+ LocationSummary::CallKind call_kind =
+ (type == Primitive::kPrimInt) ? LocationSummary::kNoCall : LocationSummary::kCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimLong: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(type));
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
+ Primitive::Type type = instruction->GetType();
+ LocationSummary* locations = instruction->GetLocations();
+ bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
+
+ switch (type) {
+ case Primitive::kPrimInt: {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register lhs = locations->InAt(0).AsRegister<Register>();
+ Register rhs = locations->InAt(1).AsRegister<Register>();
+ if (isR6) {
+ __ ModR6(dst, lhs, rhs);
+ } else {
+ __ ModR2(dst, lhs, rhs);
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLmod),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickLmod));
+ CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
+ break;
+ }
+ case Primitive::kPrimFloat: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmodf),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmodf));
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ break;
+ }
+ case Primitive::kPrimDouble: {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pFmod),
+ instruction, instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickFmod));
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected rem type " << type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ memory_barrier->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
+ GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
+}
+
+void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
+ Primitive::Type return_type = ret->InputAt(0)->GetType();
+ locations->SetInAt(0, MipsReturnLocation(return_type));
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
+ ret->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
+ codegen_->GenerateFrameExit();
+}
+
+void LocationsBuilderMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
+ HandleShift(shl);
+}
+
+void LocationsBuilderMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
+ HandleShift(shr);
+}
+
+void LocationsBuilderMIPS::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
+ HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo());
+}
+
+void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
+ HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(instruction,
+ instruction->GetFieldType(),
+ calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
+ HBasicBlock* block = instruction->GetBlock();
+ if (block->GetLoopInformation() != nullptr) {
+ DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
+ // The back edge will generate the suspend check.
+ return;
+ }
+ if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
+ // The goto will generate the suspend check.
+ return;
+ }
+ GenerateSuspendCheck(instruction, nullptr);
+}
+
+void LocationsBuilderMIPS::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
+ instruction,
+ instruction->GetDexPc(),
+ nullptr,
+ IsDirectEntrypoint(kQuickDeliverException));
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+}
+
+void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ Primitive::Type input_type = conversion->GetInputType();
+ Primitive::Type result_type = conversion->GetResultType();
+ DCHECK_NE(input_type, result_type);
+
+ if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
+ (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
+ LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
+ }
+
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
+ (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
+ call_kind = LocationSummary::kCall;
+ }
+
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
+
+ if (call_kind == LocationSummary::kNoCall) {
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+
+ if (Primitive::IsFloatingPointType(result_type)) {
+ locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ } else {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ }
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+
+ if (Primitive::IsFloatingPointType(input_type)) {
+ locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
+ } else {
+ DCHECK_EQ(input_type, Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ }
+
+ locations->SetOut(calling_convention.GetReturnLocation(result_type));
+ }
+}
+
+void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
+ LocationSummary* locations = conversion->GetLocations();
+ Primitive::Type result_type = conversion->GetResultType();
+ Primitive::Type input_type = conversion->GetInputType();
+ bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
+
+ DCHECK_NE(input_type, result_type);
+
+ if (result_type == Primitive::kPrimLong && Primitive::IsIntegralType(input_type)) {
+ Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
+ Register dst_low = locations->Out().AsRegisterPairLow<Register>();
+ Register src = locations->InAt(0).AsRegister<Register>();
+
+ __ Move(dst_low, src);
+ __ Sra(dst_high, src, 31);
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
+ Register dst = locations->Out().AsRegister<Register>();
+ Register src = (input_type == Primitive::kPrimLong)
+ ? locations->InAt(0).AsRegisterPairLow<Register>()
+ : locations->InAt(0).AsRegister<Register>();
+
+ switch (result_type) {
+ case Primitive::kPrimChar:
+ __ Andi(dst, src, 0xFFFF);
+ break;
+ case Primitive::kPrimByte:
+ if (has_sign_extension) {
+ __ Seb(dst, src);
+ } else {
+ __ Sll(dst, src, 24);
+ __ Sra(dst, dst, 24);
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (has_sign_extension) {
+ __ Seh(dst, src);
+ } else {
+ __ Sll(dst, src, 16);
+ __ Sra(dst, dst, 16);
+ }
+ break;
+ case Primitive::kPrimInt:
+ __ Move(dst, src);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
+ if (input_type != Primitive::kPrimLong) {
+ Register src = locations->InAt(0).AsRegister<Register>();
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ __ Mtc1(src, FTMP);
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsw(dst, FTMP);
+ } else {
+ __ Cvtdw(dst, FTMP);
+ }
+ } else {
+ int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
+ : QUICK_ENTRY_POINT(pL2d);
+ bool direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickL2f)
+ : IsDirectEntrypoint(kQuickL2d);
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickL2f, float, int64_t>();
+ } else {
+ CheckEntrypointTypes<kQuickL2d, double, int64_t>();
+ }
+ }
+ } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
+ CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
+ int32_t entry_offset;
+ bool direct;
+ if (result_type != Primitive::kPrimLong) {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
+ : QUICK_ENTRY_POINT(pD2iz);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2iz)
+ : IsDirectEntrypoint(kQuickD2iz);
+ } else {
+ entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
+ : QUICK_ENTRY_POINT(pD2l);
+ direct = (result_type == Primitive::kPrimFloat) ? IsDirectEntrypoint(kQuickF2l)
+ : IsDirectEntrypoint(kQuickD2l);
+ }
+ codegen_->InvokeRuntime(entry_offset,
+ conversion,
+ conversion->GetDexPc(),
+ nullptr,
+ direct);
+ if (result_type != Primitive::kPrimLong) {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2iz, int32_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2iz, int32_t, double>();
+ }
+ } else {
+ if (input_type == Primitive::kPrimFloat) {
+ CheckEntrypointTypes<kQuickF2l, int64_t, float>();
+ } else {
+ CheckEntrypointTypes<kQuickD2l, int64_t, double>();
+ }
+ }
+ } else if (Primitive::IsFloatingPointType(result_type) &&
+ Primitive::IsFloatingPointType(input_type)) {
+ FRegister dst = locations->Out().AsFpuRegister<FRegister>();
+ FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
+ if (result_type == Primitive::kPrimFloat) {
+ __ Cvtsd(dst, src);
+ } else {
+ __ Cvtds(dst, src);
+ }
+ } else {
+ LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
+ << " to " << result_type;
+ }
+}
+
+void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
+ HandleShift(ushr);
+}
+
+void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
+ HandleBinaryOp(instruction);
+}
+
+void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, this should be removed during prepare for register allocator.
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS::VisitFakeString(HFakeString* instruction) {
+ DCHECK(codegen_->IsBaseline());
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
+}
+
+void InstructionCodeGeneratorMIPS::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
+ DCHECK(codegen_->IsBaseline());
+ // Will be generated at use site.
+}
+
+void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Create a set of compare/jumps.
+ const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ for (int32_t i = 0; i < num_entries; ++i) {
+ int32_t case_value = lower_bound + i;
+ MipsLabel* successor_label = codegen_->GetLabelOf(successors[i]);
+ if (case_value == 0) {
+ __ Beqz(value_reg, successor_label);
+ } else {
+ __ LoadConst32(TMP, case_value);
+ __ Beq(value_reg, TMP, successor_label);
+ }
+ }
+
+ // Insert the default branch for every other value.
+ if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ __ B(codegen_->GetLabelOf(default_block));
+ }
+}
+
+void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ // The trampoline uses the same calling convention as dex calling conventions,
+ // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+ // the method_idx.
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
+ codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
+}
+
+#undef __
+#undef QUICK_ENTRY_POINT
+
+} // namespace mips
+} // namespace art
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
new file mode 100644
index 0000000000..a571e76933
--- /dev/null
+++ b/compiler/optimizing/code_generator_mips.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
+
+#include "code_generator.h"
+#include "dex/compiler_enums.h"
+#include "driver/compiler_options.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/mips/assembler_mips.h"
+
+namespace art {
+namespace mips {
+
+// InvokeDexCallingConvention registers
+
+static constexpr Register kParameterCoreRegisters[] =
+ { A1, A2, A3 };
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+static constexpr FRegister kParameterFpuRegisters[] =
+ { F12, F14 };
+static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
+
+
+// InvokeRuntimeCallingConvention registers
+
+static constexpr Register kRuntimeParameterCoreRegisters[] =
+ { A0, A1, A2, A3 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+
+static constexpr FRegister kRuntimeParameterFpuRegisters[] =
+ { F12, F14};
+static constexpr size_t kRuntimeParameterFpuRegistersLength =
+ arraysize(kRuntimeParameterFpuRegisters);
+
+
+static constexpr Register kCoreCalleeSaves[] =
+ { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
+static constexpr FRegister kFpuCalleeSaves[] =
+ { F20, F22, F24, F26, F28, F30 };
+
+
+class CodeGeneratorMIPS;
+
+class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFpuRegisters,
+ kParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitorMIPS() {}
+ virtual ~InvokeDexCallingConventionVisitorMIPS() {}
+
+ Location GetNextLocation(Primitive::Type type) OVERRIDE;
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE;
+ Location GetMethodLocation() const OVERRIDE;
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
+};
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
+ public:
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength,
+ kMipsPointerSize) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(V0, V1)
+ : Location::RegisterLocation(V0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(A2, A3)
+ : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
+};
+
+class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
+ public:
+ ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
+ : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+
+ void Exchange(int index1, int index2, bool double_slot);
+
+ MipsAssembler* GetAssembler() const;
+
+ private:
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
+};
+
+class SlowPathCodeMIPS : public SlowPathCode {
+ public:
+ SlowPathCodeMIPS() : entry_label_(), exit_label_() {}
+
+ MipsLabel* GetEntryLabel() { return &entry_label_; }
+ MipsLabel* GetExitLabel() { return &exit_label_; }
+
+ private:
+ MipsLabel entry_label_;
+ MipsLabel exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
+};
+
+class LocationsBuilderMIPS : public HGraphVisitor {
+ public:
+ LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ private:
+ void HandleInvoke(HInvoke* invoke);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+
+ InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
+
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
+};
+
+class InstructionCodeGeneratorMIPS : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ void Visit##name(H##name* instr) OVERRIDE;
+
+ FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
+ << " (id " << instruction->GetId() << ")";
+ }
+
+ MipsAssembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
+ void GenerateMemoryBarrier(MemBarrierKind kind);
+ void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void HandleBinaryOp(HBinaryOperation* operation);
+ void HandleShift(HBinaryOperation* operation);
+ void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
+ void GenerateImplicitNullCheck(HNullCheck* instruction);
+ void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateTestAndBranch(HInstruction* instruction,
+ MipsLabel* true_target,
+ MipsLabel* false_target,
+ MipsLabel* always_true_target);
+ void HandleGoto(HInstruction* got, HBasicBlock* successor);
+
+ MipsAssembler* const assembler_;
+ CodeGeneratorMIPS* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
+};
+
+class CodeGeneratorMIPS : public CodeGenerator {
+ public:
+ CodeGeneratorMIPS(HGraph* graph,
+ const MipsInstructionSetFeatures& isa_features,
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
+ virtual ~CodeGeneratorMIPS() {}
+
+ void GenerateFrameEntry() OVERRIDE;
+ void GenerateFrameExit() OVERRIDE;
+
+ void Bind(HBasicBlock* block) OVERRIDE;
+
+ void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+ void Move32(Location destination, Location source);
+ void Move64(Location destination, Location source);
+ void MoveConstant(Location location, HConstant* c);
+
+ size_t GetWordSize() const OVERRIDE { return kMipsWordSize; }
+
+ size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; }
+
+ uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE {
+ return assembler_.GetLabelLocation(GetLabelOf(block));
+ }
+
+ HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ MipsAssembler* GetAssembler() OVERRIDE { return &assembler_; }
+ const MipsAssembler& GetAssembler() const OVERRIDE { return assembler_; }
+
+ void MarkGCCard(Register object, Register value);
+
+ // Register allocation.
+
+ void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+
+ void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ // Blocks all register pairs made out of blocked core registers.
+ void UpdateBlockedPairRegisters() const;
+
+ InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
+
+ const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
+ return isa_features_;
+ }
+
+ MipsLabel* GetLabelOf(HBasicBlock* block) const {
+ return CommonGetLabelOf<MipsLabel>(block_labels_, block);
+ }
+
+ void Initialize() OVERRIDE {
+ block_labels_ = CommonInitializeLabels<MipsLabel>();
+ }
+
+ void Finalize(CodeAllocator* allocator) OVERRIDE;
+
+ // Code generation helpers.
+
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+
+ void MoveConstant(Location destination, int32_t value);
+
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+ // Generate code to invoke a runtime entry point.
+ void InvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path) OVERRIDE;
+
+ void InvokeRuntime(int32_t offset,
+ HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path,
+ bool is_direct_entrypoint);
+
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
+
+ bool NeedsTwoRegisters(Primitive::Type type) const {
+ return type == Primitive::kPrimLong;
+ }
+
+ void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
+ void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
+ Location temp ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
+ Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ MipsLabel* block_labels_;
+ MipsLabel frame_entry_label_;
+ LocationsBuilderMIPS location_builder_;
+ InstructionCodeGeneratorMIPS instruction_visitor_;
+ ParallelMoveResolverMIPS move_resolver_;
+ MipsAssembler assembler_;
+ const MipsInstructionSetFeatures& isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
+};
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 1a08503cf9..5f78285b69 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -342,8 +342,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
- : locations->Out();
+ Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
uint32_t dex_pc = instruction_->GetDexPc();
DCHECK(instruction_->IsCheckCast()
|| !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
@@ -1778,6 +1777,9 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
return;
}
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
LocationSummary* locations = instruction->GetLocations();
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
@@ -1855,6 +1857,48 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
}
}
break;
+
+ case kCondB:
+ case kCondAE:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
}
}
@@ -2072,6 +2116,17 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bgtzc(lhs, true_target);
break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqzc(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnezc(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
}
} else {
if (use_imm) {
@@ -2086,12 +2141,16 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondEQ:
case kCondGE:
case kCondLE:
+ case kCondBE:
+ case kCondAE:
// if lhs == rhs for a positive condition, then it is a branch
__ B(true_target);
break;
case kCondNE:
case kCondLT:
case kCondGT:
+ case kCondB:
+ case kCondA:
// if lhs == rhs for a negative condition, then it is a NOP
break;
}
@@ -2115,6 +2174,18 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bltc(rhs_reg, lhs, true_target);
break;
+ case kCondB:
+ __ Bltuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeuc(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltuc(rhs_reg, lhs, true_target);
+ break;
}
}
}
@@ -2151,8 +2222,7 @@ void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
@@ -3462,6 +3532,38 @@ void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
DCHECK(codegen_->IsBaseline());
LocationSummary* locations =
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7799437235..df3fc0d1e9 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -217,9 +217,6 @@ class InstructionCodeGeneratorMIPS64 : public HGraphVisitor {
Mips64Assembler* GetAssembler() const { return assembler_; }
private:
- // Generate code for the given suspend check. If not null, `successor`
- // is the block to branch to if the suspend check is not needed, and after
- // the suspend call.
void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
void GenerateMemoryBarrier(MemBarrierKind kind);
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f8be21a06e..963eec2529 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -428,7 +428,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
-inline Condition X86SignedCondition(IfCondition cond) {
+inline Condition X86Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -436,19 +436,30 @@ inline Condition X86SignedCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition and FP condition to x86 name.
inline Condition X86UnsignedOrFPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
+ // Signed to unsigned, and FP to x86 name.
case kCondLT: return kBelow;
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ // Unsigned remain unchanged.
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -521,7 +532,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1040,8 +1052,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
@@ -1067,7 +1078,7 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = X86UnsignedOrFPCondition(if_cond);
+ Condition final_condition = X86UnsignedOrFPCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
@@ -1088,6 +1099,18 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
@@ -1101,12 +1124,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, Immediate(val_high));
}
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
if (val_low == 0) {
@@ -1120,12 +1143,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, right_high);
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
__ cmpl(left_low, right_low);
@@ -1214,7 +1237,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
// Condition has not been materialized, use its inputs as the
@@ -1247,7 +1270,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1283,8 +1306,7 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::Any());
}
}
@@ -1309,9 +1331,8 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -1338,8 +1359,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitCondition(HCondition* cond) {
@@ -1405,7 +1425,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86SignedCondition(cond->GetCondition()), reg);
+ __ setb(X86Condition(cond->GetCondition()), reg);
return;
}
case Primitive::kPrimLong:
@@ -1483,15 +1503,46 @@ void InstructionCodeGeneratorX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
@@ -1500,9 +1551,8 @@ void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -1511,9 +1561,8 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -1522,9 +1571,8 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1533,9 +1581,8 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1550,8 +1597,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3685,8 +3731,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4684,13 +4729,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5613,15 +5656,13 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr
}
}
-void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5669,6 +5710,51 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
+void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Constant area pointer.
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // And the temporary we need.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Optimizing has a jump area.
+ Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+
+ // Remove the bias, if needed.
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg, -lower_bound));
+ value_reg = temp_reg;
+ }
+
+ // Is the value in range?
+ DCHECK_GE(num_entries, 1);
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load (target-constant_area) from the jump table, indexing by the value.
+ __ movl(temp_reg, codegen_->LiteralCaseTable(switch_instr, constant_area, value_reg));
+
+ // Compute the actual target address by adding in constant_area.
+ __ addl(temp_reg, constant_area);
+
+ // And jump.
+ __ jmp(temp_reg);
+}
+
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
@@ -5752,28 +5838,18 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
}
}
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
- // Generate the constant area if needed.
- X86Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values.
- assembler->Align(4, 0);
- constant_area_start_ = assembler->CodeSize();
- assembler->AddConstantArea();
- }
-
- // And finish up.
- CodeGenerator::Finalize(allocator);
-}
-
/**
* Class to handle late fixup of offsets into constant area.
*/
class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
- RIPFixup(const CodeGeneratorX86& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
+ RIPFixup(CodeGeneratorX86& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86* codegen_;
private:
void Process(const MemoryRegion& region, int pos) OVERRIDE {
@@ -5781,19 +5857,77 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
- int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
}
- const CodeGeneratorX86& codegen_;
-
// Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
+ int32_t offset_into_constant_area_;
};
+/**
+ * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86& codegen, HX86PackedSwitch* switch_instr)
+ : RIPFixup(codegen, static_cast<size_t>(-1)), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // The label values in the jump table are computed relative to the
+ // instruction addressing the constant area.
+ const int32_t relative_offset = codegen_->GetMethodAddressOffset();
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - relative_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HX86PackedSwitch* switch_instr_;
+};
+
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
@@ -5814,98 +5948,18 @@ Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
return Address(reg, kDummy32BitOffset, fixup);
}
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class ConstantHandlerVisitor : public HGraphVisitor {
- public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
-
- private:
- void VisitAdd(HAdd* add) OVERRIDE {
- BinaryFP(add);
- }
-
- void VisitSub(HSub* sub) OVERRIDE {
- BinaryFP(sub);
- }
-
- void VisitMul(HMul* mul) OVERRIDE {
- BinaryFP(mul);
- }
-
- void VisitDiv(HDiv* div) OVERRIDE {
- BinaryFP(div);
- }
+Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
+ Register reg,
+ Register value) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
- void VisitReturn(HReturn* ret) OVERRIDE {
- HConstant* value = ret->InputAt(0)->AsConstant();
- if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
- ReplaceInput(ret, value, 0, true);
- }
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void BinaryFP(HBinaryOperation* bin) {
- HConstant* rhs = bin->InputAt(1)->AsConstant();
- if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
- ReplaceInput(bin, rhs, 1, false);
- }
- }
-
- void InitializeConstantAreaPointer(HInstruction* user) {
- // Ensure we only initialize the pointer once.
- if (base_ != nullptr) {
- return;
- }
-
- HGraph* graph = GetGraph();
- HBasicBlock* entry = graph->GetEntryBlock();
- base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
- HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
- entry->InsertInstructionBefore(base_, insert_pos);
- DCHECK(base_ != nullptr);
- }
-
- void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
- HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
- insn->ReplaceInput(load_constant, input_index);
- }
-
- void HandleInvoke(HInvoke* invoke) {
- // Ensure that we can load FP arguments from the constant area.
- for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
- HConstant* input = invoke->InputAt(i)->AsConstant();
- if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
- ReplaceInput(invoke, input, i, true);
- }
- }
- }
-
- // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
- // input to the HX86LoadFromConstantTable instructions.
- HX86ComputeBaseMethodAddress* base_;
-};
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
- visitor.VisitInsertionOrder();
+ // We want a scaled address, as we are extracting the correct offset from the table.
+ return Address(reg, value, TIMES_4, kDummy32BitOffset, table_fixup);
}
// TODO: target as memory.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ae2d84f945..fdfc5ab69b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
};
+class JumpTableRIPFixup;
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
@@ -385,6 +387,8 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralInt32Address(int32_t v, Register reg);
Address LiteralInt64Address(int64_t v, Register reg);
+ Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
private:
@@ -405,6 +409,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Used for fixups to the constant area.
int32_t constant_area_start_;
+ // Fixups for jump tables that need to be patched after the constant table is generated.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
// If there is a HX86ComputeBaseMethodAddress instruction in the graph
// (which shall be the sole instruction of this kind), subtracting this offset
// from the value contained in the out register of this HX86ComputeBaseMethodAddress
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 21120a0c80..ed2e4ca87c 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -449,11 +449,16 @@ inline Condition X86_64IntegerCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps FP condition to x86_64 name.
inline Condition X86_64FPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
@@ -462,6 +467,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ default: break; // should not happen
};
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -670,7 +676,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1043,8 +1050,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
@@ -1246,8 +1252,7 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
HInstruction* cond = deoptimize->InputAt(0);
- DCHECK(cond->IsCondition());
- if (cond->AsCondition()->NeedsMaterialization()) {
+ if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
locations->SetInAt(0, Location::Any());
}
}
@@ -1272,9 +1277,8 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -1301,8 +1305,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
@@ -1474,6 +1477,38 @@ void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
@@ -1575,9 +1610,8 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
@@ -1586,9 +1620,8 @@ void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -1597,9 +1630,8 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -1608,9 +1640,8 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1619,9 +1650,9 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1636,8 +1667,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3591,8 +3621,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -4412,13 +4441,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -5293,15 +5320,13 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
}
}
-void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5322,31 +5347,43 @@ void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
int32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
- CpuRegister value_reg = locations->InAt(0).AsRegister<CpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors[i]));
+ // Remove the bias, if needed.
+ Register value_reg_out = value_reg_in.AsRegister();
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg_in, -lower_bound));
+ value_reg_out = temp_reg.AsRegister();
}
+ CpuRegister value_reg(value_reg_out);
- // And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
- }
+ // Is the value in range?
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the address of the jump table in the constant area.
+ __ leaq(base_reg, codegen_->LiteralCaseTable(switch_instr));
+
+ // Load the (signed) offset from the jump table.
+ __ movsxd(temp_reg, Address(base_reg, value_reg, TIMES_4, 0));
+
+ // Add the offset to the address of the table base.
+ __ addq(temp_reg, base_reg);
+
+ // And jump.
+ __ jmp(temp_reg);
}
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
@@ -5372,15 +5409,85 @@ void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
}
}
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ RIPFixup(CodeGeneratorX86_64& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86_64* codegen_;
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. We use the address of the
+ // 'next' instruction, which is 'pos' (patch the 4 bytes before).
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - pos;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ // Location in constant area that the fixup refers to.
+ size_t offset_into_constant_area_;
+};
+
+/**
+ t * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86_64& codegen, HPackedSwitch* switch_instr)
+ : RIPFixup(codegen, -1), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // Compute the offset from the start of the function to this jump table.
+ const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table;
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - current_table_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HPackedSwitch* switch_instr_;
+};
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values. If used for vectors at a later time, this will need to be
- // updated to 16 bytes with the appropriate offset.
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8 byte values.
assembler->Align(4, 0);
constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
assembler->AddConstantArea();
}
@@ -5388,31 +5495,6 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-/**
- * Class to handle late fixup of offsets into constant area.
- */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
- public:
- RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
-
- private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
- // Patch the correct offset for the instruction. We use the address of the
- // 'next' instruction, which is 'pos' (patch the 4 bytes before).
- int constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int relative_position = constant_offset - pos;
-
- // Patch in the right value.
- region.StoreUnaligned<int32_t>(pos - 4, relative_position);
- }
-
- const CodeGeneratorX86_64& codegen_;
-
- // Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
-};
-
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
@@ -5453,6 +5535,16 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type t
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
+ return Address::RIP(table_fixup);
+}
+
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d6a6a7e760..dc86a48ce7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -234,6 +234,9 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
};
+// Class for fixups to jump tables.
+class JumpTableRIPFixup;
+
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
@@ -354,6 +357,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Load a 64 bit value into a register in the most efficient manner.
void Load64BitValue(CpuRegister dest, int64_t value);
+ Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
@@ -391,6 +395,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// We will fix this up in the linker later to have the right value.
static constexpr int32_t kDummy32BitOffset = 256;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 22f227c56a..57de41f557 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -20,6 +20,8 @@
#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm/registers_arm.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "arch/mips/instruction_set_features_mips.h"
+#include "arch/mips/registers_mips.h"
#include "arch/mips64/instruction_set_features_mips64.h"
#include "arch/mips64/registers_mips64.h"
#include "arch/x86/instruction_set_features_x86.h"
@@ -29,6 +31,7 @@
#include "builder.h"
#include "code_generator_arm.h"
#include "code_generator_arm64.h"
+#include "code_generator_mips.h"
#include "code_generator_mips64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
@@ -43,6 +46,7 @@
#include "ssa_liveness_analysis.h"
#include "utils.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/mips/managed_register_mips.h"
#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
@@ -177,6 +181,14 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
Run(allocator, codegenARM64, has_result, expected);
}
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ codegenMIPS.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kMips) {
+ Run(allocator, codegenMIPS, has_result, expected);
+ }
+
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
@@ -234,6 +246,11 @@ static void RunCodeOptimized(HGraph* graph,
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kMips) {
+ std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
+ MipsInstructionSetFeatures::FromCppDefines());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
+ RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kMips64) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
@@ -761,4 +778,130 @@ TEST(CodegenTest, ReturnDivInt2Addr) {
TestCode(data, true, 2);
}
+// Helper method.
+static void TestComparison(IfCondition condition, int64_t i, int64_t j, Primitive::Type type) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ graph->SetExitBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(block);
+ block->AddSuccessor(exit_block);
+
+ HInstruction* op1;
+ HInstruction* op2;
+ if (type == Primitive::kPrimInt) {
+ op1 = graph->GetIntConstant(i);
+ op2 = graph->GetIntConstant(j);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimLong);
+ op1 = graph->GetLongConstant(i);
+ op2 = graph->GetLongConstant(j);
+ }
+
+ HInstruction* comparison = nullptr;
+ bool expected_result = false;
+ const uint64_t x = i;
+ const uint64_t y = j;
+ switch (condition) {
+ case kCondEQ:
+ comparison = new (&allocator) HEqual(op1, op2);
+ expected_result = (i == j);
+ break;
+ case kCondNE:
+ comparison = new (&allocator) HNotEqual(op1, op2);
+ expected_result = (i != j);
+ break;
+ case kCondLT:
+ comparison = new (&allocator) HLessThan(op1, op2);
+ expected_result = (i < j);
+ break;
+ case kCondLE:
+ comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ expected_result = (i <= j);
+ break;
+ case kCondGT:
+ comparison = new (&allocator) HGreaterThan(op1, op2);
+ expected_result = (i > j);
+ break;
+ case kCondGE:
+ comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ expected_result = (i >= j);
+ break;
+ case kCondB:
+ comparison = new (&allocator) HBelow(op1, op2);
+ expected_result = (x < y);
+ break;
+ case kCondBE:
+ comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ expected_result = (x <= y);
+ break;
+ case kCondA:
+ comparison = new (&allocator) HAbove(op1, op2);
+ expected_result = (x > y);
+ break;
+ case kCondAE:
+ comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ expected_result = (x >= y);
+ break;
+ }
+ block->AddInstruction(comparison);
+ block->AddInstruction(new (&allocator) HReturn(comparison));
+
+ auto hook_before_codegen = [](HGraph*) {
+ };
+ RunCodeOptimized(graph, hook_before_codegen, true, expected_result);
+}
+
+TEST(CodegenTest, ComparisonsInt) {
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimInt);
+ TestComparison(kCondNE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondB, i, j, Primitive::kPrimInt);
+ TestComparison(kCondBE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondA, i, j, Primitive::kPrimInt);
+ TestComparison(kCondAE, i, j, Primitive::kPrimInt);
+ }
+ }
+}
+
+TEST(CodegenTest, ComparisonsLong) {
+ // TODO: make MIPS work for long
+ if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
+ return;
+ }
+
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimLong);
+ TestComparison(kCondNE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondB, i, j, Primitive::kPrimLong);
+ TestComparison(kCondBE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondA, i, j, Primitive::kPrimLong);
+ TestComparison(kCondAE, i, j, Primitive::kPrimLong);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index f54547534f..4abe5e953c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -206,7 +206,9 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
- return vixl::Assembler::IsImmAddSub(value);
+ // If `value` does not fit but `-value` does, VIXL will automatically use
+ // the 'opposite' instruction.
+ return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/constant_area_fixups_x86.cc
new file mode 100644
index 0000000000..c3470002c5
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_area_fixups_x86.h"
+
+namespace art {
+namespace x86 {
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
+ // address the constant area.
+ InitializeConstantAreaPointer(switch_insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b2e222f1a9..2feb75cc9f 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -569,7 +569,7 @@ TEST(ConstantFolding, IntConstantFoldingAndJumps) {
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 5,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 4,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 8,
Instruction::RETURN | 2 << 8);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index cf0a4acd4a..2c6a1ef63d 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -140,7 +140,7 @@ TEST(DeadCodeElimination, AdditionsAndInconditionalJumps) {
Instruction::ADD_INT_LIT16 | 1 << 8 | 0 << 12, 3,
Instruction::GOTO | 4 << 8,
Instruction::ADD_INT_LIT16 | 0 << 8 | 2 << 12, 2,
- static_cast<uint16_t>(Instruction::GOTO | -5 << 8),
+ static_cast<uint16_t>(Instruction::GOTO | 0xFFFFFFFB << 8),
Instruction::ADD_INT_LIT16 | 2 << 8 | 1 << 12, 4,
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 56f2718264..aa375f697b 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -34,7 +34,10 @@ TEST(GVNTest, LocalFieldElimination) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -111,7 +114,10 @@ TEST(GVNTest, GlobalFieldElimination) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -188,7 +194,10 @@ TEST(GVNTest, LoopFieldElimination) {
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (&allocator) HBasicBlock(graph);
@@ -328,7 +337,10 @@ TEST(GVNTest, LoopSideEffects) {
inner_loop_body->AddSuccessor(inner_loop_header);
inner_loop_exit->AddSuccessor(outer_loop_header);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimBoolean);
+ HInstruction* parameter = new (&allocator) HParameterValue(graph->GetDexFile(),
+ 0,
+ 0,
+ Primitive::kPrimBoolean);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
outer_loop_header->AddInstruction(new (&allocator) HIf(parameter));
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index cf0f3493fd..8968a44da8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -650,8 +650,7 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
case kCondLE: return lower_value <= upper_value;
case kCondGT: return lower_value > upper_value;
case kCondGE: return lower_value >= upper_value;
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default: LOG(FATAL) << "CONDITION UNREACHABLE";
}
}
return false; // not certain, may be untaken
@@ -680,8 +679,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
(IsIntAndGet(upper_expr, &value) && value >= (min - stride_value - 1));
case kCondGE:
return (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value));
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default:
+ LOG(FATAL) << "CONDITION UNREACHABLE";
}
return false; // not certain, may be infinite
}
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 20492e7152..f16da2a3f7 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -20,7 +20,6 @@
#include "builder.h"
#include "gtest/gtest.h"
#include "induction_var_analysis.h"
-#include "induction_var_range.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -78,7 +77,8 @@ class InductionVarAnalysisTest : public testing::Test {
graph_->SetExitBlock(exit_);
// Provide entry and exit instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot, true);
+ parameter_ = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimNot, true);
entry_->AddInstruction(parameter_);
constant0_ = graph_->GetIntConstant(0);
constant1_ = graph_->GetIntConstant(1);
@@ -522,36 +522,6 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
EXPECT_STREQ("periodic(( - (1)), (0))", GetInductionInfo(neg, 0).c_str());
}
-TEST_F(InductionVarAnalysisTest, FindRange) {
- // Setup:
- // for (int i = 0; i < 100; i++) {
- // k = i << 1;
- // k = k + 1;
- // a[k] = 0;
- // }
- BuildLoopNest(1);
- HInstruction *shl = InsertInstruction(
- new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0);
- InsertLocalStore(induc_, shl, 0);
- HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
- InsertLocalStore(induc_, add, 0);
- HInstruction* store = InsertArrayStore(induc_, 0);
- PerformInductionVarAnalysis();
-
- EXPECT_STREQ("((2) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
-
- InductionVarRange range(iva_);
- InductionVarRange::Value v_min = range.GetMinInduction(store, store->InputAt(1));
- InductionVarRange::Value v_max = range.GetMaxInduction(store, store->InputAt(1));
- ASSERT_TRUE(v_min.is_known);
- EXPECT_EQ(0, v_min.a_constant);
- EXPECT_EQ(1, v_min.b_constant);
- ASSERT_TRUE(v_max.is_known);
- EXPECT_EQ(0, v_max.a_constant);
- EXPECT_EQ(199, v_max.b_constant);
-}
-
TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
// Setup:
// k = 0;
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index db12819060..f4842f9696 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -75,6 +75,13 @@ static InductionVarRange::Value SimplifyMax(InductionVarRange::Value v) {
return v;
}
+static HInstruction* Insert(HBasicBlock* preheader, HInstruction* instruction) {
+ DCHECK(preheader != nullptr);
+ DCHECK(instruction != nullptr);
+ preheader->InsertInstructionBefore(instruction, preheader->GetLastInstruction());
+ return instruction;
+}
+
//
// Public class methods.
//
@@ -94,6 +101,21 @@ InductionVarRange::Value InductionVarRange::GetMaxInduction(HInstruction* contex
return SimplifyMax(GetInduction(context, instruction, /* is_min */ false));
}
+bool InductionVarRange::CanGenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/bool* top_test) {
+ return GenerateCode(context, instruction, nullptr, nullptr, nullptr, nullptr, top_test);
+}
+
+bool InductionVarRange::GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper) {
+ return GenerateCode(context, instruction, graph, block, lower, upper, nullptr);
+}
+
//
// Private class methods.
//
@@ -162,15 +184,15 @@ InductionVarRange::Value InductionVarRange::GetVal(HInductionVarAnalysis::Induct
case HInductionVarAnalysis::kFetch:
return GetFetch(info->fetch, trip, in_body, is_min);
case HInductionVarAnalysis::kTripCountInLoop:
- if (!in_body) {
- return is_min ? Value(0)
- : GetVal(info->op_b, trip, in_body, is_min); // one extra!
+ if (!in_body && !is_min) { // one extra!
+ return GetVal(info->op_b, trip, in_body, is_min);
}
FALLTHROUGH_INTENDED;
case HInductionVarAnalysis::kTripCountInBody:
- if (in_body) {
- return is_min ? Value(0)
- : SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
+ if (is_min) {
+ return Value(0);
+ } else if (in_body) {
+ return SubValue(GetVal(info->op_b, trip, in_body, is_min), Value(1));
}
break;
default:
@@ -256,9 +278,11 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
bool InductionVarRange::GetConstant(HInductionVarAnalysis::InductionInfo* info, int32_t *value) {
Value v_min = GetVal(info, nullptr, false, /* is_min */ true);
Value v_max = GetVal(info, nullptr, false, /* is_min */ false);
- if (v_min.a_constant == 0 && v_max.a_constant == 0 && v_min.b_constant == v_max.b_constant) {
- *value = v_min.b_constant;
- return true;
+ if (v_min.is_known && v_max.is_known) {
+ if (v_min.a_constant == 0 && v_max.a_constant == 0 && v_min.b_constant == v_max.b_constant) {
+ *value = v_min.b_constant;
+ return true;
+ }
}
return false;
}
@@ -326,4 +350,129 @@ InductionVarRange::Value InductionVarRange::MergeVal(Value v1, Value v2, bool is
return Value();
}
+bool InductionVarRange::GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper,
+ /*out*/bool* top_test) {
+ HLoopInformation* loop = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ if (loop != nullptr) {
+ HBasicBlock* header = loop->GetHeader();
+ bool in_body = context->GetBlock() != header;
+ HInductionVarAnalysis::InductionInfo* info = induction_analysis_->LookupInfo(loop, instruction);
+ HInductionVarAnalysis::InductionInfo* trip =
+ induction_analysis_->LookupInfo(loop, header->GetLastInstruction());
+ if (info != nullptr && trip != nullptr) {
+ if (top_test != nullptr) {
+ *top_test = trip->operation != HInductionVarAnalysis::kTripCountInLoop;
+ }
+ return
+ // Success on lower if invariant (not set), or code can be generated.
+ ((info->induction_class == HInductionVarAnalysis::kInvariant) ||
+ GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+ // And success on upper.
+ GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+ }
+ }
+ return false;
+}
+
+bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph, // when set, code is generated
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ bool in_body,
+ bool is_min) {
+ if (info != nullptr) {
+ Primitive::Type type = Primitive::kPrimInt;
+ HInstruction* opa = nullptr;
+ HInstruction* opb = nullptr;
+ int32_t value = 0;
+ switch (info->induction_class) {
+ case HInductionVarAnalysis::kInvariant:
+ // Invariants.
+ switch (info->operation) {
+ case HInductionVarAnalysis::kAdd:
+ if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HAdd(type, opa, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kSub: // second reversed!
+ if (GenerateCode(info->op_a, trip, graph, block, &opa, in_body, is_min) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HSub(type, opa, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kNeg: // reversed!
+ if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, !is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HNeg(type, opb));
+ }
+ return true;
+ }
+ break;
+ case HInductionVarAnalysis::kFetch:
+ if (graph != nullptr) {
+ *result = info->fetch; // already in HIR
+ }
+ return true;
+ case HInductionVarAnalysis::kTripCountInLoop:
+ if (!in_body && !is_min) { // one extra!
+ return GenerateCode(info->op_b, trip, graph, block, result, in_body, is_min);
+ }
+ FALLTHROUGH_INTENDED;
+ case HInductionVarAnalysis::kTripCountInBody:
+ if (is_min) {
+ if (graph != nullptr) {
+ *result = graph->GetIntConstant(0);
+ }
+ return true;
+ } else if (in_body) {
+ if (GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block,
+ new (graph->GetArena())
+ HSub(type, opb, graph->GetIntConstant(1)));
+ }
+ return true;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case HInductionVarAnalysis::kLinear:
+ // Linear induction a * i + b, for normalized 0 <= i < TC. Restrict to unit stride only
+ // to avoid arithmetic wrap-around situations that are hard to guard against.
+ if (GetConstant(info->op_a, &value)) {
+ if (value == 1 || value == -1) {
+ const bool is_min_a = value == 1 ? is_min : !is_min;
+ if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ *result = Insert(block, new (graph->GetArena()) HAdd(type, opa, opb));
+ }
+ return true;
+ }
+ }
+ }
+ break;
+ default: // TODO(ajcbik): add more cases
+ break;
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index dbdd2eedac..7fa5a26dce 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -68,6 +68,33 @@ class InductionVarRange {
*/
Value GetMaxInduction(HInstruction* context, HInstruction* instruction);
+ /**
+ * Returns true if range analysis is able to generate code for the lower and upper bound
+ * expressions on the instruction in the given context. Output parameter top_test denotes
+ * whether a top test is needed to protect the trip-count expression evaluation.
+ */
+ bool CanGenerateCode(HInstruction* context, HInstruction* instruction, /*out*/bool* top_test);
+
+ /**
+ * Generates the actual code in the HIR for the lower and upper bound expressions on the
+ * instruction in the given context. Code for the lower and upper bound expression are
+ * generated in given block and graph and are returned in lower and upper, respectively.
+ * For a loop invariant, lower is not set.
+ *
+ * For example, given expression x+i with range [0, 5] for i, calling this method
+ * will generate the following sequence:
+ *
+ * block:
+ * lower: add x, 0
+ * upper: add x, 5
+ */
+ bool GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper);
+
private:
//
// Private helper methods.
@@ -102,6 +129,27 @@ class InductionVarRange {
static Value DivValue(Value v1, Value v2);
static Value MergeVal(Value v1, Value v2, bool is_min);
+ /**
+ * Generates code for lower/upper expression in the HIR. Returns true on success.
+ * With graph == nullptr, the method can be used to determine if code generation
+ * would be successful without generating actual code yet.
+ */
+ bool GenerateCode(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper,
+ bool* top_test);
+
+ static bool GenerateCode(HInductionVarAnalysis::InductionInfo* info,
+ HInductionVarAnalysis::InductionInfo* trip,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** result,
+ bool in_body,
+ bool is_min);
+
/** Results of prior induction variable analysis. */
HInductionVarAnalysis *induction_analysis_;
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 4497a884d9..8fbc59fb4a 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -49,12 +49,52 @@ class InductionVarRangeTest : public testing::Test {
/** Constructs bare minimum graph. */
void BuildGraph() {
graph_->SetNumberOfVRegs(1);
- HBasicBlock* entry_block = new (&allocator_) HBasicBlock(graph_);
- HBasicBlock* exit_block = new (&allocator_) HBasicBlock(graph_);
- graph_->AddBlock(entry_block);
- graph_->AddBlock(exit_block);
- graph_->SetEntryBlock(entry_block);
- graph_->SetExitBlock(exit_block);
+ entry_block_ = new (&allocator_) HBasicBlock(graph_);
+ exit_block_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetEntryBlock(entry_block_);
+ graph_->SetExitBlock(exit_block_);
+ }
+
+ /** Constructs loop with given upper bound. */
+ void BuildLoop(HInstruction* upper) {
+ // Control flow.
+ loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_preheader_);
+ HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_header);
+ HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_body);
+ entry_block_->AddSuccessor(loop_preheader_);
+ loop_preheader_->AddSuccessor(loop_header);
+ loop_header->AddSuccessor(loop_body);
+ loop_header->AddSuccessor(exit_block_);
+ loop_body->AddSuccessor(loop_header);
+ // Instructions.
+ HLocal* induc = new (&allocator_) HLocal(0);
+ entry_block_->AddInstruction(induc);
+ loop_preheader_->AddInstruction(
+ new (&allocator_) HStoreLocal(induc, graph_->GetIntConstant(0))); // i = 0
+ loop_preheader_->AddInstruction(new (&allocator_) HGoto());
+ HInstruction* load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
+ loop_header->AddInstruction(load);
+ condition_ = new (&allocator_) HLessThan(load, upper);
+ loop_header->AddInstruction(condition_);
+ loop_header->AddInstruction(new (&allocator_) HIf(condition_)); // i < u
+ load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
+ loop_body->AddInstruction(load);
+ increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, load, graph_->GetIntConstant(1));
+ loop_body->AddInstruction(increment_);
+ loop_body->AddInstruction(new (&allocator_) HStoreLocal(induc, increment_)); // i++
+ loop_body->AddInstruction(new (&allocator_) HGoto());
+ exit_block_->AddInstruction(new (&allocator_) HReturnVoid());
+ }
+
+ /** Performs induction variable analysis. */
+ void PerformInductionVarAnalysis() {
+ ASSERT_TRUE(graph_->TryBuildingSsa());
+ iva_->Run();
}
/** Constructs an invariant. */
@@ -146,15 +186,20 @@ class InductionVarRangeTest : public testing::Test {
ArenaPool pool_;
ArenaAllocator allocator_;
HGraph* graph_;
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+ HBasicBlock* loop_preheader_;
HInductionVarAnalysis* iva_;
- // Two dummy instructions.
+ // Instructions.
+ HInstruction* condition_;
+ HInstruction* increment_;
HReturnVoid x_;
HReturnVoid y_;
};
//
-// The actual InductionVarRange tests.
+// Tests on static methods.
//
TEST_F(InductionVarRangeTest, GetMinMaxNull) {
@@ -349,4 +394,83 @@ TEST_F(InductionVarRangeTest, MaxValue) {
ExpectEqual(Value(), MaxValue(Value(55), Value(&y_, 1, -50)));
}
+//
+// Tests on instance methods.
+//
+
+TEST_F(InductionVarRangeTest, FindRangeConstantTripCount) {
+ BuildLoop(graph_->GetIntConstant(1000));
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ // In context of header: known.
+ ExpectEqual(Value(0), range.GetMinInduction(condition_, condition_->InputAt(0)));
+ ExpectEqual(Value(1000), range.GetMaxInduction(condition_, condition_->InputAt(0)));
+
+ // In context of loop-body: known.
+ ExpectEqual(Value(0), range.GetMinInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(999), range.GetMaxInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(1), range.GetMinInduction(increment_, increment_));
+ ExpectEqual(Value(1000), range.GetMaxInduction(increment_, increment_));
+}
+
+TEST_F(InductionVarRangeTest, FindRangeSymbolicTripCount) {
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ entry_block_->AddInstruction(parameter);
+ BuildLoop(parameter);
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ // In context of header: full range unknown.
+ ExpectEqual(Value(0), range.GetMinInduction(condition_, condition_->InputAt(0)));
+ ExpectEqual(Value(), range.GetMaxInduction(condition_, condition_->InputAt(0)));
+
+ // In context of loop-body: known.
+ ExpectEqual(Value(0), range.GetMinInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(parameter, 1, -1), range.GetMaxInduction(increment_, condition_->InputAt(0)));
+ ExpectEqual(Value(1), range.GetMinInduction(increment_, increment_));
+ ExpectEqual(Value(parameter, 1, 0), range.GetMaxInduction(increment_, increment_));
+}
+
+TEST_F(InductionVarRangeTest, CodeGeneration) {
+ HInstruction* parameter = new (&allocator_) HParameterValue(
+ graph_->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ entry_block_->AddInstruction(parameter);
+ BuildLoop(parameter);
+ PerformInductionVarAnalysis();
+ InductionVarRange range(iva_);
+
+ HInstruction* lower = nullptr;
+ HInstruction* upper = nullptr;
+ bool top_test = false;
+
+ // Can generate code in context of loop-body only.
+ EXPECT_FALSE(range.CanGenerateCode(condition_, condition_->InputAt(0), &top_test));
+ ASSERT_TRUE(range.CanGenerateCode(increment_, condition_->InputAt(0), &top_test));
+ EXPECT_TRUE(top_test);
+
+ // Generates code.
+ EXPECT_TRUE(range.GenerateCode(
+ increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper));
+
+ // Verify lower is 0+0.
+ ASSERT_TRUE(lower != nullptr);
+ ASSERT_TRUE(lower->IsAdd());
+ ASSERT_TRUE(lower->InputAt(0)->IsIntConstant());
+ EXPECT_EQ(0, lower->InputAt(0)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(lower->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(0, lower->InputAt(1)->AsIntConstant()->GetValue());
+
+ // Verify upper is (V-1)+0
+ ASSERT_TRUE(upper != nullptr);
+ ASSERT_TRUE(upper->IsAdd());
+ ASSERT_TRUE(upper->InputAt(0)->IsSub());
+ EXPECT_TRUE(upper->InputAt(0)->InputAt(0)->IsParameterValue());
+ ASSERT_TRUE(upper->InputAt(0)->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(1, upper->InputAt(0)->InputAt(1)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(upper->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
+}
+
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index f3b5f08c7e..e2aca3091f 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -495,6 +495,9 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
number_of_inlined_instructions_ += number_of_instructions;
HInstruction* return_replacement = callee_graph->InlineInto(graph_, invoke_instruction);
+ if (return_replacement != nullptr) {
+ DCHECK_EQ(graph_, return_replacement->GetBlock()->GetGraph());
+ }
// When merging the graph we might create a new NullConstant in the caller graph which does
// not have the chance to be typed. We assign the correct type here so that we can keep the
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 839cf44632..b97dc1a511 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -73,6 +73,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
void VisitFakeString(HFakeString* fake_string) OVERRIDE;
void VisitInvoke(HInvoke* invoke) OVERRIDE;
+ void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
@@ -618,13 +619,15 @@ void InstructionSimplifierVisitor::VisitLessThanOrEqual(HLessThanOrEqual* condit
VisitCondition(condition);
}
+// TODO: unsigned comparisons too?
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
// This simplification is currently supported on x86, x86_64, ARM and ARM64.
- // TODO: Implement it for MIPS64.
+ // TODO: Implement it for MIPS and MIPS64.
InstructionSet instruction_set = GetGraph()->GetInstructionSet();
- if (instruction_set == kMips64) {
+ if (instruction_set == kMips || instruction_set == kMips64) {
return;
}
@@ -1149,4 +1152,16 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
}
}
+void InstructionSimplifierVisitor::VisitDeoptimize(HDeoptimize* deoptimize) {
+ HInstruction* cond = deoptimize->InputAt(0);
+ if (cond->IsConstant()) {
+ if (cond->AsIntConstant()->IsZero()) {
+ // Never deopt: instruction can be removed.
+ deoptimize->GetBlock()->RemoveInstruction(deoptimize);
+ } else {
+ // Always deopt.
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 764a11475f..56c4177b29 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -43,6 +43,93 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
+#define __ codegen->GetAssembler()->
+
+static void MoveFromReturnRegister(Location trg,
+ Primitive::Type type,
+ CodeGeneratorMIPS64* codegen) {
+ if (!trg.IsValid()) {
+ DCHECK_EQ(type, Primitive::kPrimVoid);
+ return;
+ }
+
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
+ GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
+ if (trg_reg != V0) {
+ __ Move(V0, trg_reg);
+ }
+ } else {
+ FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
+ if (trg_reg != F0) {
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(F0, trg_reg);
+ } else {
+ __ MovD(F0, trg_reg);
+ }
+ }
+ }
+}
+
+static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
+ InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
+}
+
+// Slow-path for fallback (calling the managed code to handle the
+// intrinsic) in an intrinsified call. This will copy the arguments
+// into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations
+// given by the invoke's location summary. If an intrinsic
+// modifies those locations before a slowpath call, they must be
+// restored!
+class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
+
+ __ Bind(GetEntryLabel());
+
+ SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+ MoveArguments(invoke_, codegen);
+
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ Location::RegisterLocation(A0));
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+ } else {
+ UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+ UNREACHABLE();
+ }
+
+ // Copy the result back to the expected output.
+ Location out = invoke_->GetLocations()->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+ }
+
+ RestoreLiveRegisters(codegen, invoke_->GetLocations());
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+
+ private:
+ // The instruction where this slow path is happening.
+ HInvoke* const invoke_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
+};
+
+#undef __
+
bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
@@ -185,7 +272,7 @@ void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
}
-static void GenCountZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -202,7 +289,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -211,7 +298,166 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ if (is64bit) {
+ __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>());
+ __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ } else {
+ __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16);
+ __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ }
+}
+
+// int java.lang.Integer.numberOfTrailingZeros(int i)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), false, GetAssembler());
+}
+
+// int java.lang.Long.numberOfTrailingZeros(long i)
+void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenRotateRight(HInvoke* invoke,
+ Primitive::Type type,
+ Mips64Assembler* assembler) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
+ if (type == Primitive::kPrimInt) {
+ shift &= 0x1f;
+ __ Rotr(out, in, shift);
+ } else {
+ shift &= 0x3f;
+ if (shift < 32) {
+ __ Drotr(out, in, shift);
+ } else {
+ shift &= 0x1f;
+ __ Drotr32(out, in, shift);
+ }
+ }
+ } else {
+ GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt) {
+ __ Rotrv(out, in, shamt);
+ } else {
+ __ Drotrv(out, in, shamt);
+ }
+ }
+}
+
+// int java.lang.Integer.rotateRight(int i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+// int java.lang.Long.rotateRight(long i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler());
+}
+
+static void GenRotateLeft(HInvoke* invoke,
+ Primitive::Type type,
+ Mips64Assembler* assembler) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ int32_t shift = -static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
+ if (type == Primitive::kPrimInt) {
+ shift &= 0x1f;
+ __ Rotr(out, in, shift);
+ } else {
+ shift &= 0x3f;
+ if (shift < 32) {
+ __ Drotr(out, in, shift);
+ } else {
+ shift &= 0x1f;
+ __ Drotr32(out, in, shift);
+ }
+ }
+ } else {
+ GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt) {
+ __ Subu(TMP, ZERO, shamt);
+ __ Rotrv(out, in, TMP);
+ } else {
+ __ Dsubu(TMP, ZERO, shamt);
+ __ Drotrv(out, in, TMP);
+ }
+ }
+}
+
+// int java.lang.Integer.rotateLeft(int i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+// int java.lang.Long.rotateLeft(long i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
+ GenRotateLeft(invoke, Primitive::kPrimLong, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -765,6 +1011,505 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile,
+ CodeGeneratorMIPS64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister trg = locations->Out().AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ __ Lw(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimNot:
+ __ Lwu(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Ld(trg, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+}
+
+// int sun.misc.Unsafe.getInt(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_);
+}
+
+// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_);
+}
+
+// long sun.misc.Unsafe.getLong(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_);
+}
+
+// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObject(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_);
+}
+
+static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+ Primitive::Type type,
+ bool is_volatile,
+ bool is_ordered,
+ CodeGeneratorMIPS64* codegen) {
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile || is_ordered) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ __ Sw(value, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Sd(value, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+
+ if (type == Primitive::kPrimNot) {
+ codegen->MarkGCCard(base, value);
+ }
+}
+
+// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_);
+}
+
+// char java.lang.String.charAt(int index)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ // TODO: Maybe we can support range check elimination. Overall,
+ // though, I think it's not worth the cost.
+ // TODO: For simplicity, the index parameter is requested in a
+ // register, so different from Quick we will not optimize the
+ // code for constants (which would save a register).
+
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load the string size
+ __ Lw(TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Revert to slow path if idx is too large, or negative
+ __ Bgeuc(idx, TMP, slow_path->GetEntryLabel());
+
+ // out = obj[2*idx].
+ __ Sll(TMP, idx, 1); // idx * 2
+ __ Daddu(TMP, TMP, obj); // Address of char at location idx
+ __ Lhu(out, TMP, value_offset); // Load char at location idx
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// int java.lang.String.compareTo(String anotherString)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(argument, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize,
+ pStringCompareTo).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ Mips64Assembler* assembler,
+ CodeGeneratorMIPS64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we
+ // don't know statically, or directly dispatch if we have a constant.
+ SlowPathCodeMIPS64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
+ // Always needs the slow-path. We could directly dispatch to it,
+ // but this case should be rare, so for simplicity just put the
+ // full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
+ __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, A2);
+ // Start-index = 0.
+ __ Clear(tmp_reg);
+ } else {
+ __ Slt(TMP, A2, ZERO); // if fromIndex < 0
+ __ Seleqz(A2, A2, TMP); // fromIndex = 0
+ }
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pIndexOf).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+// int java.lang.String.indexOf(int ch)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+// int java.lang.String.indexOf(int ch, int fromIndex)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+// java.lang.String.String(byte[] bytes)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(byte_array, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// java.lang.String.String(char[] value)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+}
+
+// java.lang.String.String(String original)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -776,38 +1521,10 @@ void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSE
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePut)
-UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCharAt)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringEquals)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
-UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index e83aebb5be..8a7aded935 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1837,6 +1837,14 @@ void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks several libcore tests); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index e0d88a91d3..7a1d92d2fe 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -1909,6 +1909,14 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASLong(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
+ // The UnsafeCASObject intrinsic does not always work when heap
+ // poisoning is enabled (it breaks several libcore tests); turn it
+ // off temporarily as a quick fix.
+ // TODO(rpl): Fix it and turn it back on.
+ if (kPoisonHeapReferences) {
+ return;
+ }
+
CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
}
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 558892d01c..a036bd5aa9 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -61,7 +61,7 @@ class LICMTest : public testing::Test {
loop_body_->AddSuccessor(loop_header_);
// Provide boiler-plate instructions.
- parameter_ = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
+ parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry_->AddInstruction(parameter_);
constant_ = graph_->GetIntConstant(42);
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 24a89bca4e..98c3096cae 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -606,8 +606,23 @@ static void UpdateInputsUsers(HInstruction* instruction) {
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
- InsertInstructionBefore(replacement, initial);
- initial->ReplaceWith(replacement);
+ if (initial->IsControlFlow()) {
+ // We can only replace a control flow instruction with another control flow instruction.
+ DCHECK(replacement->IsControlFlow());
+ DCHECK_EQ(replacement->GetId(), -1);
+ DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid);
+ DCHECK_EQ(initial->GetBlock(), this);
+ DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid);
+ DCHECK(initial->GetUses().IsEmpty());
+ DCHECK(initial->GetEnvUses().IsEmpty());
+ replacement->SetBlock(this);
+ replacement->SetId(GetGraph()->GetNextInstructionId());
+ instructions_.InsertInstructionBefore(replacement, initial);
+ UpdateInputsUsers(replacement);
+ } else {
+ InsertInstructionBefore(replacement, initial);
+ initial->ReplaceWith(replacement);
+ }
RemoveInstruction(initial);
}
@@ -1576,7 +1591,6 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
// Replace the invoke with the return value of the inlined graph.
if (last->IsReturn()) {
return_value = last->InputAt(0);
- invoke->ReplaceWith(return_value);
} else {
DCHECK(last->IsReturnVoid());
}
@@ -1624,10 +1638,6 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
}
}
- if (return_value != nullptr) {
- invoke->ReplaceWith(return_value);
- }
-
// Update the meta information surrounding blocks:
// (1) the graph they are now in,
// (2) the reverse post order of that graph,
@@ -1697,20 +1707,21 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t parameter_index = 0;
for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
+ HInstruction* replacement = nullptr;
if (current->IsNullConstant()) {
- current->ReplaceWith(outer_graph->GetNullConstant(current->GetDexPc()));
+ replacement = outer_graph->GetNullConstant(current->GetDexPc());
} else if (current->IsIntConstant()) {
- current->ReplaceWith(outer_graph->GetIntConstant(
- current->AsIntConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetIntConstant(
+ current->AsIntConstant()->GetValue(), current->GetDexPc());
} else if (current->IsLongConstant()) {
- current->ReplaceWith(outer_graph->GetLongConstant(
- current->AsLongConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetLongConstant(
+ current->AsLongConstant()->GetValue(), current->GetDexPc());
} else if (current->IsFloatConstant()) {
- current->ReplaceWith(outer_graph->GetFloatConstant(
- current->AsFloatConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetFloatConstant(
+ current->AsFloatConstant()->GetValue(), current->GetDexPc());
} else if (current->IsDoubleConstant()) {
- current->ReplaceWith(outer_graph->GetDoubleConstant(
- current->AsDoubleConstant()->GetValue(), current->GetDexPc()));
+ replacement = outer_graph->GetDoubleConstant(
+ current->AsDoubleConstant()->GetValue(), current->GetDexPc());
} else if (current->IsParameterValue()) {
if (kIsDebugBuild
&& invoke->IsInvokeStaticOrDirect()
@@ -1720,13 +1731,25 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
size_t last_input_index = invoke->InputCount() - 1;
DCHECK(parameter_index != last_input_index);
}
- current->ReplaceWith(invoke->InputAt(parameter_index++));
+ replacement = invoke->InputAt(parameter_index++);
} else if (current->IsCurrentMethod()) {
- current->ReplaceWith(outer_graph->GetCurrentMethod());
+ replacement = outer_graph->GetCurrentMethod();
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
entry_block_->RemoveInstruction(current);
}
+ if (replacement != nullptr) {
+ current->ReplaceWith(replacement);
+ // If the current is the return value then we need to update the latter.
+ if (current == return_value) {
+ DCHECK_EQ(entry_block_, return_value->GetBlock());
+ return_value = replacement;
+ }
+ }
+ }
+
+ if (return_value != nullptr) {
+ invoke->ReplaceWith(return_value);
}
// Finally remove the invoke from the caller.
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 82909c41b6..7cf6339b6e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -81,12 +81,19 @@ static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
static constexpr uint32_t kNoDexPc = -1;
enum IfCondition {
- kCondEQ,
- kCondNE,
- kCondLT,
- kCondLE,
- kCondGT,
- kCondGE,
+ // All types.
+ kCondEQ, // ==
+ kCondNE, // !=
+ // Signed integers and floating-point numbers.
+ kCondLT, // <
+ kCondLE, // <=
+ kCondGT, // >
+ kCondGE, // >=
+ // Unsigned integers.
+ kCondB, // <
+ kCondBE, // <=
+ kCondA, // >
+ kCondAE, // >=
};
class HInstructionList : public ValueObject {
@@ -988,11 +995,15 @@ class HLoopInformationOutwardIterator : public ValueObject {
};
#define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
+ M(Above, Condition) \
+ M(AboveOrEqual, Condition) \
M(Add, BinaryOperation) \
M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
+ M(Below, Condition) \
+ M(BelowOrEqual, Condition) \
M(BooleanNot, UnaryOperation) \
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
@@ -1070,11 +1081,14 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
+
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
- M(X86LoadFromConstantTable, Instruction)
+ M(X86LoadFromConstantTable, Instruction) \
+ M(X86PackedSwitch, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1082,6 +1096,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1787,8 +1802,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return true;
}
- virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
- UNUSED(obj);
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const {
return false;
}
@@ -1905,16 +1919,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionTypeEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
@@ -2477,8 +2489,7 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2548,8 +2559,7 @@ class HBinaryOperation : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2653,8 +2663,6 @@ class HEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x == y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2675,6 +2683,8 @@ class HEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x == y; }
+
DISALLOW_COPY_AND_ASSIGN(HEqual);
};
@@ -2685,8 +2695,6 @@ class HNotEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x != y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2707,6 +2715,8 @@ class HNotEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x != y; }
+
DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
@@ -2715,8 +2725,6 @@ class HLessThan : public HCondition {
HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x < y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2737,6 +2745,8 @@ class HLessThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
@@ -2745,8 +2755,6 @@ class HLessThanOrEqual : public HCondition {
HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x <= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2767,6 +2775,8 @@ class HLessThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
@@ -2775,8 +2785,6 @@ class HGreaterThan : public HCondition {
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x > y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2797,6 +2805,8 @@ class HGreaterThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
@@ -2805,8 +2815,6 @@ class HGreaterThanOrEqual : public HCondition {
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x >= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2827,9 +2835,138 @@ class HGreaterThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
+class HBelow : public HCondition {
+ public:
+ HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Below);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelow);
+};
+
+class HBelowOrEqual : public HCondition {
+ public:
+ HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(BelowOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
+};
+
+class HAbove : public HCondition {
+ public:
+ HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Above);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAbove);
+};
+
+class HAboveOrEqual : public HCondition {
+ public:
+ HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(AboveOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
+};
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
@@ -3253,8 +3390,7 @@ class HInvokeStaticOrDirect : public HInvoke {
target_method_(target_method),
dispatch_info_(dispatch_info) {}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
@@ -3692,8 +3828,7 @@ class HDivZeroCheck : public HExpression<1> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3927,24 +4062,31 @@ class HXor : public HBinaryOperation {
// the calling convention.
class HParameterValue : public HExpression<0> {
public:
- HParameterValue(uint8_t index,
+ HParameterValue(const DexFile& dex_file,
+ uint16_t type_index,
+ uint8_t index,
Primitive::Type parameter_type,
bool is_this = false)
: HExpression(parameter_type, SideEffects::None(), kNoDexPc),
+ dex_file_(dex_file),
+ type_index_(type_index),
index_(index),
is_this_(is_this),
can_be_null_(!is_this) {}
+ const DexFile& GetDexFile() const { return dex_file_; }
+ uint16_t GetTypeIndex() const { return type_index_; }
uint8_t GetIndex() const { return index_; }
+ bool IsThis() const { return is_this_; }
bool CanBeNull() const OVERRIDE { return can_be_null_; }
void SetCanBeNull(bool can_be_null) { can_be_null_ = can_be_null; }
- bool IsThis() const { return is_this_; }
-
DECLARE_INSTRUCTION(ParameterValue);
private:
+ const DexFile& dex_file_;
+ const uint16_t type_index_;
// The index of this parameter in the parameters list. Must be less
// than HGraph::number_of_in_vregs_.
const uint8_t index_;
@@ -3963,8 +4105,7 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3989,8 +4130,7 @@ class HBooleanNot : public HUnaryOperation {
: HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4028,7 +4168,7 @@ class HTypeConversion : public HExpression<1> {
Primitive::Type GetInputType() const { return GetInput()->GetType(); }
Primitive::Type GetResultType() const { return GetType(); }
- // Required by the x86 and ARM code generators when producing calls
+ // Required by the x86, ARM, MIPS and MIPS64 code generators when producing calls
// to the runtime.
bool CanBeMoved() const OVERRIDE { return true; }
@@ -4156,8 +4296,7 @@ class HNullCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4302,12 +4441,10 @@ class HArrayGet : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -4355,8 +4492,7 @@ class HArraySet : public HTemplateInstruction<3> {
// Can throw ArrayStoreException.
bool CanThrow() const OVERRIDE { return needs_type_check_; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: Same as for ArrayGet.
return false;
}
@@ -4419,8 +4555,7 @@ class HArrayLength : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4443,8 +4578,7 @@ class HBoundsCheck : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4658,8 +4792,7 @@ class HClinitCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4689,7 +4822,8 @@ class HStaticFieldGet : public HExpression<1> {
uint32_t dex_pc)
: HExpression(
field_type,
- SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
+ SideEffects::FieldReadOfType(field_type, is_volatile),
+ dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
SetRawInputAt(0, cls);
}
@@ -4731,7 +4865,8 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
: HTemplateInstruction(
- SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
+ SideEffects::FieldWriteOfType(field_type, is_volatile),
+ dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
value_can_be_null_(true) {
SetRawInputAt(0, cls);
@@ -5277,7 +5412,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
+ virtual void VisitInstruction(HInstruction* instruction ATTRIBUTE_UNUSED) {}
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 8eeac56ceb..764f5fec5b 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -34,7 +34,8 @@ TEST(Node, RemoveInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
entry->AddInstruction(new (&allocator) HGoto());
@@ -76,8 +77,10 @@ TEST(Node, InsertInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
- HInstruction* parameter2 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
+ HInstruction* parameter2 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
entry->AddInstruction(new (&allocator) HExit());
@@ -102,7 +105,8 @@ TEST(Node, AddInstruction) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
ASSERT_FALSE(parameter->HasUses());
@@ -122,7 +126,8 @@ TEST(Node, ParentEnvironment) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter1 = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
HInstruction* with_environment = new (&allocator) HNullCheck(parameter1, 0);
entry->AddInstruction(parameter1);
entry->AddInstruction(with_environment);
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index f7cc872419..556217bf74 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -62,6 +62,45 @@ class HX86LoadFromConstantTable : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
};
+// X86 version of HPackedSwitch that holds a pointer to the base method address.
+class HX86PackedSwitch : public HTemplateInstruction<2> {
+ public:
+ HX86PackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HX86ComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(1)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(X86PackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c7f08066d4..8d7b8a94b7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -361,6 +361,7 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
|| instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kMips
|| instruction_set == kMips64
|| instruction_set == kX86
|| instruction_set == kX86_64;
@@ -666,7 +667,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
- UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
@@ -839,18 +839,26 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
CompiledMethod* method = nullptr;
- const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
- DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
- || CanHandleVerificationFailure(verified_method)) {
- method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
- method_idx, jclass_loader, dex_file, dex_cache);
- } else {
- if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ if (Runtime::Current()->IsAotCompiler()) {
+ const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+ DCHECK(!verified_method->HasRuntimeThrow());
+ if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file)
+ || CanHandleVerificationFailure(verified_method)) {
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
} else {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVerifyAtRuntime);
+ } else {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledClassNotVerified);
+ }
}
+ } else {
+ // This is for the JIT compiler, which has already ensured the class is verified.
+ // We can go straight to compiling.
+ DCHECK(Runtime::Current()->UseJit());
+ method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, jclass_loader, dex_file, dex_cache);
}
if (kIsDebugBuild &&
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f7a7e420bb..26a05da4cb 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -428,12 +428,21 @@ void RTPVisitor::VisitNewArray(HNewArray* instr) {
UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), /* is_exact */ true);
}
+static mirror::Class* GetClassFromDexCache(Thread* self, const DexFile& dex_file, uint16_t type_idx)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::DexCache* dex_cache =
+ Runtime::Current()->GetClassLinker()->FindDexCache(self, dex_file, false);
+ // Get type from dex cache assuming it was populated by the verifier.
+ return dex_cache->GetResolvedType(type_idx);
+}
+
void RTPVisitor::VisitParameterValue(HParameterValue* instr) {
ScopedObjectAccess soa(Thread::Current());
// We check if the existing type is valid: the inliner may have set it.
if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) {
- // TODO: parse the signature and add precise types for the parameters.
- SetClassAsTypeInfo(instr, nullptr, /* is_exact */ false);
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
+ SetClassAsTypeInfo(instr, resolved_class, /* is_exact */ false);
}
}
@@ -479,11 +488,9 @@ void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr)
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
- mirror::DexCache* dex_cache =
- Runtime::Current()->GetClassLinker()->FindDexCache(soa.Self(), instr->GetDexFile(), false);
// Get type from dex cache assuming it was populated by the verifier.
- mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
- // TODO: investigating why we are still getting unresolved classes: b/22821472.
+ mirror::Class* resolved_class =
+ GetClassFromDexCache(soa.Self(), instr->GetDexFile(), instr->GetTypeIndex());
if (resolved_class != nullptr) {
instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
handles_->NewHandle(resolved_class), /* is_exact */ true));
@@ -756,7 +763,9 @@ void ReferenceTypePropagation::ProcessWorklist() {
while (!worklist_.empty()) {
HInstruction* instruction = worklist_.back();
worklist_.pop_back();
- if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) {
+ bool updated_nullability = UpdateNullability(instruction);
+ bool updated_reference_type = UpdateReferenceTypeInfo(instruction);
+ if (updated_nullability || updated_reference_type) {
AddDependentInstructionsToWorklist(instruction);
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 6fc77721e7..ef22c816a0 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -85,12 +85,13 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
InstructionSet instruction_set) {
- return instruction_set == kArm64
- || instruction_set == kX86_64
+ return instruction_set == kArm
+ || instruction_set == kArm64
+ || instruction_set == kMips
|| instruction_set == kMips64
- || instruction_set == kArm
+ || instruction_set == kThumb2
|| instruction_set == kX86
- || instruction_set == kThumb2;
+ || instruction_set == kX86_64;
}
static bool ShouldProcess(bool processing_core_registers, LiveInterval* interval) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 1511606950..ed5419ee49 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -475,7 +475,8 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
NullHandle<mirror::DexCache> dex_cache;
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -624,7 +625,8 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimNot);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
HBasicBlock* block = new (allocator) HBasicBlock(graph);
@@ -698,7 +700,8 @@ static HGraph* BuildTwoSubs(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* parameter = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* parameter = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(parameter);
HInstruction* constant1 = graph->GetIntConstant(1);
@@ -768,8 +771,10 @@ static HGraph* BuildDiv(ArenaAllocator* allocator,
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* first = new (allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* second = new (allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* first = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* second = new (allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(first);
entry->AddInstruction(second);
@@ -820,10 +825,14 @@ TEST(RegisterAllocatorTest, SpillInactive) {
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
- HInstruction* one = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* two = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* three = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
- HInstruction* four = new (&allocator) HParameterValue(0, Primitive::kPrimInt);
+ HInstruction* one = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* two = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* three = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
+ HInstruction* four = new (&allocator) HParameterValue(
+ graph->GetDexFile(), 0, 0, Primitive::kPrimInt);
entry->AddInstruction(one);
entry->AddInstruction(two);
entry->AddInstruction(three);
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index f1c0b9258a..d97a2a40b2 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -301,7 +301,7 @@ class Assembler {
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { UNUSED(format); }
+ virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index b30f7d772e..f1233ca457 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -83,6 +83,15 @@ class AssemblerTest : public testing::Test {
fmt);
}
+ std::string RepeatRRNoDupes(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegistersNoDupes<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) {
return RepeatTemplatedRegisters<Reg, Reg>(f,
GetRegisters(),
@@ -608,6 +617,45 @@ class AssemblerTest : public testing::Test {
return str;
}
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRegistersNoDupes(void (Ass::*f)(Reg1, Reg2),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ WarnOnCombinations(reg1_registers.size() * reg2_registers.size());
+
+ std::string str;
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ if (reg1 == reg2) continue;
+ (assembler_.get()->*f)(*reg1, *reg2);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
template <typename Reg1, typename Reg2, typename Reg3>
std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2, Reg3),
const std::vector<Reg1*> reg1_registers,
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index b078f3e4cf..00e8995bff 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -335,6 +335,10 @@ void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x04);
}
+void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 1, 0x06);
+}
+
void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x06);
}
@@ -351,6 +355,10 @@ void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
}
+void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a);
+}
+
void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
}
@@ -363,6 +371,10 @@ void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
}
+void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e);
+}
+
void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
}
@@ -375,6 +387,10 @@ void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x16);
}
+void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 1, 0x16);
+}
+
void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x17);
}
@@ -773,6 +789,10 @@ void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
+void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20);
+}
+
void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index d083eb4306..33f22d2c2d 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -123,15 +123,19 @@ class Mips64Assembler FINAL : public Assembler {
void Sra(GpuRegister rd, GpuRegister rt, int shamt);
void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
+ void Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Drotr(GpuRegister rd, GpuRegister rt, int shamt);
void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Drotr32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsra32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
+ void Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
@@ -230,6 +234,7 @@ class Mips64Assembler FINAL : public Assembler {
void Cvtdw(FpuRegister fd, FpuRegister fs);
void Cvtsd(FpuRegister fd, FpuRegister fs);
void Cvtds(FpuRegister fd, FpuRegister fs);
+ void Cvtsl(FpuRegister fd, FpuRegister fs);
void Cvtdl(FpuRegister fd, FpuRegister fs);
void Mfc1(GpuRegister rt, FpuRegister fs);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 2071aca546..16f29b00bc 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -215,6 +215,22 @@ TEST_F(AssemblerMIPS64Test, AbsD) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsD, "abs.d ${reg1}, ${reg2}"), "abs.d");
}
+TEST_F(AssemblerMIPS64Test, MovS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::MovS, "mov.s ${reg1}, ${reg2}"), "mov.s");
+}
+
+TEST_F(AssemblerMIPS64Test, MovD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::MovD, "mov.d ${reg1}, ${reg2}"), "mov.d");
+}
+
+TEST_F(AssemblerMIPS64Test, NegS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::NegS, "neg.s ${reg1}, ${reg2}"), "neg.s");
+}
+
+TEST_F(AssemblerMIPS64Test, NegD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::NegD, "neg.d ${reg1}, ${reg2}"), "neg.d");
+}
+
TEST_F(AssemblerMIPS64Test, RoundLS) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLS, "round.l.s ${reg1}, ${reg2}"), "round.l.s");
}
@@ -307,6 +323,34 @@ TEST_F(AssemblerMIPS64Test, CvtDL) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l");
}
+TEST_F(AssemblerMIPS64Test, CvtDS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "cvt.d.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtDW) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "cvt.d.w");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSL) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "cvt.s.l");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "cvt.s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSW) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "cvt.s.w");
+}
+
+////////////////
+// CALL / JMP //
+////////////////
+
+TEST_F(AssemblerMIPS64Test, Jalr) {
+ DriverStr(RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr");
+}
+
//////////
// MISC //
//////////
@@ -319,6 +363,14 @@ TEST_F(AssemblerMIPS64Test, Dbitswap) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Dbitswap, "dbitswap ${reg1}, ${reg2}"), "dbitswap");
}
+TEST_F(AssemblerMIPS64Test, Seb) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Seb, "seb ${reg1}, ${reg2}"), "seb");
+}
+
+TEST_F(AssemblerMIPS64Test, Seh) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Seh, "seh ${reg1}, ${reg2}"), "seh");
+}
+
TEST_F(AssemblerMIPS64Test, Dsbh) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Dsbh, "dsbh ${reg1}, ${reg2}"), "dsbh");
}
@@ -331,6 +383,42 @@ TEST_F(AssemblerMIPS64Test, Wsbh) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
}
+TEST_F(AssemblerMIPS64Test, Sll) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "sll");
+}
+
+TEST_F(AssemblerMIPS64Test, Srl) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
+}
+
+TEST_F(AssemblerMIPS64Test, Sra) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsll) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrl) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsra) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsll32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrl32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsra32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32");
+}
+
TEST_F(AssemblerMIPS64Test, Sc) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc");
}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 04e815aa1d..5347bf0302 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2369,44 +2369,48 @@ void X86Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = Low32Bits(v);
int32_t v_high = High32Bits(v);
if (buffer_.size() > 1) {
// Ensure we don't pass the end of the buffer.
for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 93ecdf52fe..b50fda907a 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -166,6 +166,39 @@ class Address : public Operand {
Init(base_in, disp.Int32Value());
}
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale_in, index_in, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ Init(base_in, index_in, scale_in, disp);
+ }
+
+ Address(Register base_in,
+ Register index_in,
+ ScaleFactor scale_in,
+ int32_t disp, AssemblerFixup *fixup) {
+ Init(base_in, index_in, scale_in, disp);
+ SetFixup(fixup);
+ }
+
+ static Address Absolute(uintptr_t addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset<4> addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
void Init(Register base_in, int32_t disp) {
if (disp == 0 && base_in != EBP) {
SetModRM(0, base_in);
@@ -181,14 +214,7 @@ class Address : public Operand {
}
}
- Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
- CHECK_NE(index_in, ESP); // Illegal addressing mode.
- SetModRM(0, ESP);
- SetSIB(scale_in, index_in, EBP);
- SetDisp32(disp);
- }
-
- Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ void Init(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
CHECK_NE(index_in, ESP); // Illegal addressing mode.
if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
@@ -203,20 +229,6 @@ class Address : public Operand {
SetDisp32(disp);
}
}
-
- static Address Absolute(uintptr_t addr) {
- Address result;
- result.SetModRM(0, EBP);
- result.SetDisp32(addr);
- return result;
- }
-
- static Address Absolute(ThreadOffset<4> addr) {
- return Absolute(addr.Int32Value());
- }
-
- private:
- Address() {}
};
@@ -252,40 +264,39 @@ class ConstantArea {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v);
+ size_t AddDouble(double v);
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v);
+ size_t AddFloat(float v);
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v);
+ size_t AddInt32(int32_t v);
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v);
+ size_t AddInt64(int64_t v);
bool IsEmpty() const {
return buffer_.size() == 0;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
-
- void AddFixup(AssemblerFixup* fixup) {
- fixups_.push_back(fixup);
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
}
- const std::vector<AssemblerFixup*>& GetFixups() const {
- return fixups_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
}
private:
- static constexpr size_t kEntrySize = sizeof(int32_t);
+ static constexpr size_t elem_size_ = sizeof(int32_t);
std::vector<int32_t> buffer_;
- std::vector<AssemblerFixup*> fixups_;
};
class X86Assembler FINAL : public Assembler {
@@ -740,26 +751,36 @@ class X86Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
- void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
private:
inline void EmitUint8(uint8_t value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 6e7d74d528..9eb5e67041 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -3122,7 +3122,14 @@ void X86_64Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
+ // Look for an existing match.
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
return i * elem_size_;
@@ -3130,12 +3137,10 @@ int ConstantArea::AddInt32(int32_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = v;
int32_t v_high = v >> 32;
if (buffer_.size() > 1) {
@@ -3148,18 +3153,18 @@ int ConstantArea::AddInt64(int64_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 255f551675..01d28e305d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -269,36 +269,40 @@ class Address : public Operand {
* Class to handle constant area values.
*/
class ConstantArea {
- public:
- ConstantArea() {}
+ public:
+ ConstantArea() {}
- // Add a double to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddDouble(double v);
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddDouble(double v);
- // Add a float to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddFloat(float v);
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddFloat(float v);
- // Add an int32_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt32(int32_t v);
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt32(int32_t v);
- // Add an int64_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt64(int64_t v);
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
- int GetSize() const {
- return buffer_.size() * elem_size_;
- }
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt64(int64_t v);
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
+ }
- private:
- static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
+ }
+
+ private:
+ static constexpr size_t elem_size_ = sizeof(int32_t);
+ std::vector<int32_t> buffer_;
};
@@ -806,19 +810,27 @@ class X86_64Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
@@ -826,6 +838,9 @@ class X86_64Assembler FINAL : public Assembler {
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; }
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
+
//
// Heap poisoning.
//
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 680e2d7b45..384b8794c1 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -537,10 +537,18 @@ class Dex2Oat FINAL {
// the runtime.
LogCompletionTime();
- if (kIsDebugBuild || (RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- delete runtime_; // See field declaration for why this is manual.
- delete driver_;
- delete verification_results_;
+ if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ // We want to just exit on non-debug builds, not bringing the runtime down
+ // in an orderly fashion. So release the following fields.
+ driver_.release();
+ image_writer_.release();
+ for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files_) {
+ dex_file.release();
+ }
+ oat_file_.release();
+ runtime_.release();
+ verification_results_.release();
+ key_value_store_.release();
}
}
@@ -1241,9 +1249,9 @@ class Dex2Oat FINAL {
runtime_options.push_back(std::make_pair(runtime_args_[i], nullptr));
}
- verification_results_ = new VerificationResults(compiler_options_.get());
+ verification_results_.reset(new VerificationResults(compiler_options_.get()));
callbacks_.reset(new QuickCompilerCallbacks(
- verification_results_,
+ verification_results_.get(),
&method_inliner_map_,
image_ ?
CompilerCallbacks::CallbackMode::kCompileBootImage :
@@ -1401,7 +1409,7 @@ class Dex2Oat FINAL {
}
ScopedObjectAccess soa(self);
dex_caches_.push_back(soa.AddLocalReference<jobject>(
- class_linker->RegisterDexFile(*dex_file)));
+ class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
@@ -1468,24 +1476,24 @@ class Dex2Oat FINAL {
class_loader = class_linker->CreatePathClassLoader(self, class_path_files);
}
- driver_ = new CompilerDriver(compiler_options_.get(),
- verification_results_,
- &method_inliner_map_,
- compiler_kind_,
- instruction_set_,
- instruction_set_features_.get(),
- image_,
- image_classes_.release(),
- compiled_classes_.release(),
- nullptr,
- thread_count_,
- dump_stats_,
- dump_passes_,
- dump_cfg_file_name_,
- dump_cfg_append_,
- compiler_phases_timings_.get(),
- swap_fd_,
- profile_file_);
+ driver_.reset(new CompilerDriver(compiler_options_.get(),
+ verification_results_.get(),
+ &method_inliner_map_,
+ compiler_kind_,
+ instruction_set_,
+ instruction_set_features_.get(),
+ image_,
+ image_classes_.release(),
+ compiled_classes_.release(),
+ nullptr,
+ thread_count_,
+ dump_stats_,
+ dump_passes_,
+ dump_cfg_file_name_,
+ dump_cfg_append_,
+ compiler_phases_timings_.get(),
+ swap_fd_,
+ profile_file_));
driver_->CompileAll(class_loader, dex_files_, timings_);
}
@@ -1587,7 +1595,7 @@ class Dex2Oat FINAL {
oat_writer.reset(new OatWriter(dex_files_, image_file_location_oat_checksum,
image_file_location_oat_data_begin,
image_patch_delta,
- driver_,
+ driver_.get(),
image_writer_.get(),
timings_,
key_value_store_.get()));
@@ -1776,22 +1784,21 @@ class Dex2Oat FINAL {
LOG(ERROR) << "Failed to create runtime";
return false;
}
- Runtime* runtime = Runtime::Current();
- runtime->SetInstructionSet(instruction_set_);
+ runtime_.reset(Runtime::Current());
+ runtime_->SetInstructionSet(instruction_set_);
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
- if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(), type);
+ if (!runtime_->HasCalleeSaveMethod(type)) {
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
- runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
+ runtime_->GetClassLinker()->FixupDexCaches(runtime_->GetResolutionMethod());
// Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
// set up.
interpreter::UnstartedRuntime::Initialize();
- runtime->GetClassLinker()->RunRootClinits();
- runtime_ = runtime;
+ runtime_->GetClassLinker()->RunRootClinits();
return true;
}
@@ -1940,9 +1947,7 @@ class Dex2Oat FINAL {
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the compiler down
- // in an orderly fashion. The destructor takes care of deleting this.
- VerificationResults* verification_results_;
+ std::unique_ptr<VerificationResults> verification_results_;
DexFileToMethodInlinerMap method_inliner_map_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
@@ -1950,9 +1955,7 @@ class Dex2Oat FINAL {
// Ownership for the class path files.
std::vector<std::unique_ptr<const DexFile>> class_path_files_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the runtime down
- // in an orderly fashion. The destructor takes care of deleting this.
- Runtime* runtime_;
+ std::unique_ptr<Runtime> runtime_;
size_t thread_count_;
uint64_t start_ns_;
@@ -1981,16 +1984,14 @@ class Dex2Oat FINAL {
std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
bool image_;
- std::unique_ptr<ImageWriter> image_writer_;
bool is_host_;
std::string android_root_;
std::vector<const DexFile*> dex_files_;
std::vector<jobject> dex_caches_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
- // Not a unique_ptr as we want to just exit on non-debug builds, not bringing the driver down
- // in an orderly fashion. The destructor takes care of deleting this.
- CompilerDriver* driver_;
+ std::unique_ptr<ImageWriter> image_writer_;
+ std::unique_ptr<CompilerDriver> driver_;
std::vector<std::string> verbose_methods_;
bool dump_stats_;
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index faa2d2ded9..c2f23aa523 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -58,9 +58,10 @@ static const MipsInstruction gMipsInstructions[] = {
// 0, 1, movci
{ kRTypeMask, 2, "srl", "DTA", },
{ kRTypeMask, 3, "sra", "DTA", },
- { kRTypeMask, 4, "sllv", "DTS", },
- { kRTypeMask, 6, "srlv", "DTS", },
- { kRTypeMask, 7, "srav", "DTS", },
+ { kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
+ { kRTypeMask | (0x1f << 6), (1 << 6) | 6, "rotrv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 7, "srav", "DTS", },
{ kRTypeMask, 8, "jr", "S", },
{ kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", }, // rd = 31 is implicit.
{ kRTypeMask | (0x1f << 11), 9, "jr", "S", }, // rd = 0 is implicit.
@@ -74,9 +75,10 @@ static const MipsInstruction gMipsInstructions[] = {
{ kRTypeMask, 17, "mthi", "S", },
{ kRTypeMask, 18, "mflo", "D", },
{ kRTypeMask, 19, "mtlo", "S", },
- { kRTypeMask, 20, "dsllv", "DTS", },
- { kRTypeMask, 22, "dsrlv", "DTS", },
- { kRTypeMask, 23, "dsrav", "DTS", },
+ { kRTypeMask | (0x1f << 6), 20, "dsllv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 22, "dsrlv", "DTS", },
+ { kRTypeMask | (0x1f << 6), (1 << 6) | 22, "drotrv", "DTS", },
+ { kRTypeMask | (0x1f << 6), 23, "dsrav", "DTS", },
{ kRTypeMask | (0x1f << 6), 24, "mult", "ST", },
{ kRTypeMask | (0x1f << 6), 25, "multu", "ST", },
{ kRTypeMask | (0x1f << 6), 26, "div", "ST", },
@@ -99,13 +101,14 @@ static const MipsInstruction gMipsInstructions[] = {
{ kRTypeMask, 46, "dsub", "DST", },
{ kRTypeMask, 47, "dsubu", "DST", },
// TODO: tge[u], tlt[u], teg, tne
- { kRTypeMask, 56, "dsll", "DTA", },
- { kRTypeMask, 58, "dsrl", "DTA", },
- { kRTypeMask, 59, "dsra", "DTA", },
- { kRTypeMask, 60, "dsll32", "DTA", },
- { kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
- { kRTypeMask, 62, "dsrl32", "DTA", },
- { kRTypeMask, 63, "dsra32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 56, "dsll", "DTA", },
+ { kRTypeMask | (0x1f << 21), 58, "dsrl", "DTA", },
+ { kRTypeMask | (0x1f << 21), (1 << 21) | 58, "drotr", "DTA", },
+ { kRTypeMask | (0x1f << 21), 59, "dsra", "DTA", },
+ { kRTypeMask | (0x1f << 21), 60, "dsll32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 62, "dsrl32", "DTA", },
+ { kRTypeMask | (0x1f << 21), (1 << 21) | 62, "drotr32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 63, "dsra32", "DTA", },
// SPECIAL0
{ kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f5f7748835..ea61b43627 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,7 +26,6 @@
#include <vector>
#include "arch/instruction_set_features.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
@@ -1420,8 +1419,10 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
Handle<mirror::DexCache> dex_cache(
- hs->NewHandle(Runtime::Current()->GetClassLinker()->RegisterDexFile(*dex_file)));
+ hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file,
+ runtime->GetLinearAlloc())));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1965,10 +1966,13 @@ class ImageDumper {
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
- ArtCode art_code(method);
+ OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+ reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
if (method->IsNative()) {
- DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ if (!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(quick_oat_code_begin)) {
+ DCHECK(method_header->GetNativeGcMap() == nullptr) << PrettyMethod(method);
+ DCHECK(method_header->GetMappingTable() == nullptr) << PrettyMethod(method);
+ }
bool first_occurrence;
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
@@ -1982,8 +1986,6 @@ class ImageDumper {
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
- DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1991,22 +1993,22 @@ class ImageDumper {
bool first_occurrence;
size_t gc_map_bytes = state->ComputeOatSize(
- art_code.GetNativeGcMap(image_pointer_size), &first_occurrence);
+ method_header->GetNativeGcMap(), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes = state->ComputeOatSize(
- art_code.GetMappingTable(image_pointer_size), &first_occurrence);
+ method_header->GetMappingTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes = 0u;
- if (!art_code.IsOptimized(image_pointer_size)) {
+ if (!method_header->IsOptimized()) {
// Method compiled with the optimizing compiler have no vmap table.
vmap_table_bytes = state->ComputeOatSize(
- art_code.GetVmapTable(image_pointer_size), &first_occurrence);
+ method_header->GetVmapTable(), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
@@ -2400,14 +2402,13 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
// Need to register dex files to get a working dex cache.
ScopedObjectAccess soa(self);
ClassLinker* class_linker = runtime->GetClassLinker();
- Runtime::Current()->GetOatFileManager().RegisterOatFile(
- std::unique_ptr<const OatFile>(oat_file));
+ runtime->GetOatFileManager().RegisterOatFile(std::unique_ptr<const OatFile>(oat_file));
std::vector<const DexFile*> class_path;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
- class_linker->RegisterDexFile(*dex_file);
+ class_linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc());
class_path.push_back(dex_file);
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8fe3fa2df1..9236ffb032 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,7 +19,6 @@ LOCAL_PATH := $(call my-dir)
include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
- art_code.cc \
art_field.cc \
art_method.cc \
atomic.cc.arm \
@@ -156,6 +155,7 @@ LIBART_COMMON_SRC_FILES := \
oat_file.cc \
oat_file_assistant.cc \
oat_file_manager.cc \
+ oat_quick_method_header.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index e676a09733..d6ba304bd1 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -39,7 +39,7 @@ class ArchTest : public CommonRuntimeTest {
runtime->SetInstructionSet(isa);
ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
runtime->SetCalleeSaveMethod(save_method, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = runtime->GetRuntimeMethodFrameInfo(save_method);
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index d5c7846951..9cbec1e5bc 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -16,9 +16,9 @@
#include "context_arm.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "thread-inl.h"
namespace art {
namespace arm {
@@ -37,23 +37,21 @@ void ArmContext::Reset() {
arg0_ = 0;
}
-void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode art_code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = art_code.GetQuickFrameInfo();
+void ArmContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
uint32_t core_regs = frame_info.CoreSpillMask();
DCHECK_EQ(0u, core_regs & (static_cast<uint32_t>(-1) << kNumberOfCoreRegisters));
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index ea31055e9d..2623ee9315 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -35,7 +35,7 @@ class ArmContext : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d09631bc71..631b784787 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -891,7 +891,110 @@ END art_quick_set64_instance
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+ // Fast path rosalloc allocation.
+ // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
+ // r2, r3, r12: free.
+ ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
+ // Load the class (r2)
+ ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // Check class status.
+ ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
+ cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+ // Add a fake dependence from the
+ // following access flag and size
+ // loads to the status load.
+ // This is to prevent those loads
+ // from being reordered above the
+ // status load and reading wrong
+ // values (an alternative is to use
+ // a load-acquire for the status).
+ eor r3, r3, r3
+ add r2, r2, r3
+ // Check access flags has
+ // kAccClassIsFinalizable
+ ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+ tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ // allocation stack has room.
+ // TODO: consider using ldrd.
+ ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ cmp r3, r12
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
+ cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
+ // local allocation
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ // Compute the rosalloc bracket index
+ // from the size.
+ // Align up the size by the rosalloc
+ // bracket quantum size and divide
+ // by the quantum size and subtract
+ // by 1. This code is a shorter but
+ // equivalent version.
+ sub r3, r3, #1
+ lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // Load the rosalloc run (r12)
+ add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
+ ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // Load the free list head (r3). This
+ // will be the return val.
+ ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
+ // and update the list head with the
+ // next pointer.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ // Store the class pointer in the
+ // header. This also overwrites the
+ // next pointer. The offsets are
+ // asserted to match.
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+ POISON_HEAP_REF r2
+ str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ // Push the new object onto the thread
+ // local allocation stack and
+ // increment the thread local
+ // allocation stack top.
+ ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
+ str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ // Decrement the size of the free list
+ ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ sub r1, #1
+ // TODO: consider combining this store
+ // and the list head store above using
+ // strd.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // class status load.)
+ dmb ish
+ mov r0, r3 // Set the return value and return.
+ bx lr
+
+.Lart_quick_alloc_object_rosalloc_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_rosalloc
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index cdc03fe16f..d5d1ec7f07 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -18,9 +18,9 @@
#include "context_arm64.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "thread-inl.h"
namespace art {
namespace arm64 {
@@ -39,21 +39,19 @@ void Arm64Context::Reset() {
arg0_ = 0;
}
-void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void Arm64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 11314e08ed..105e78461d 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -35,7 +35,7 @@ class Arm64Context : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index 9af7c04f5c..a50064851b 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -25,7 +25,7 @@
namespace art {
-class StackVisitor;
+class QuickMethodFrameInfo;
// Representation of a thread's context on the executing machine, used to implement long jumps in
// the quick stack frame layout.
@@ -39,10 +39,18 @@ class Context {
// Re-initializes the registers for context re-use.
virtual void Reset() = 0;
+ static uintptr_t* CalleeSaveAddress(uint8_t* frame, int num, size_t frame_size) {
+ // Callee saves are held at the top of the frame
+ uint8_t* save_addr = frame + frame_size - ((num + 1) * sizeof(void*));
+#if defined(__i386__) || defined(__x86_64__)
+ save_addr -= sizeof(void*); // account for return address
+#endif
+ return reinterpret_cast<uintptr_t*>(save_addr);
+ }
+
// Reads values from callee saves in the given frame. The frame also holds
// the method that holds the layout.
- virtual void FillCalleeSaves(const StackVisitor& fr)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) = 0;
// Sets the stack pointer value.
virtual void SetSP(uintptr_t new_sp) = 0;
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 9cfd2eb2d6..ff9c0b320d 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -107,6 +107,22 @@ static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
}
}
+static inline bool IsValidInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ case kMips:
+ case kMips64:
+ return true;
+ case kNone:
+ default:
+ return false;
+ }
+}
+
size_t GetInstructionSetAlignment(InstructionSet isa);
static inline bool Is64BitInstructionSet(InstructionSet isa) {
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index dba62d9200..4dedb3339e 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -16,7 +16,6 @@
#include "context_mips.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
@@ -37,21 +36,19 @@ void MipsContext::Reset() {
arg0_ = 0;
}
-void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index 0affe5397a..f1e2905592 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -34,7 +34,7 @@ class MipsContext : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index d808c9e0dc..bd1ac3b0a7 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -16,7 +16,6 @@
#include "context_mips64.h"
-#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
@@ -37,21 +36,19 @@ void Mips64Context::Reset() {
arg0_ = 0;
}
-void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void Mips64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
// FP registers come second, from the highest down to the lowest.
for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
- fprs_[fp_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
index 84b1c9bad4..89fbf8ffc3 100644
--- a/runtime/arch/mips64/context_mips64.h
+++ b/runtime/arch/mips64/context_mips64.h
@@ -34,7 +34,7 @@ class Mips64Context : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(SP, new_sp);
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 68156ae7e3..66c8aadf33 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1615,5 +1615,70 @@ ENTRY art_quick_deoptimize_from_compiled_code
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize_from_compiled_code
-UNIMPLEMENTED art_quick_indexof
-UNIMPLEMENTED art_quick_string_compareto
+ .set push
+ .set noat
+/* java.lang.String.compareTo(String anotherString) */
+ENTRY_NO_GP art_quick_string_compareto
+/* $a0 holds address of "this" */
+/* $a1 holds address of "anotherString" */
+ beq $a0,$a1,9f # this and anotherString are the same object
+ move $v0,$zero
+
+ lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+ sltu $at,$a2,$a3
+ seleqz $t2,$a3,$at
+ selnez $at,$a2,$at
+ or $t2,$t2,$at # $t2 now holds min(this.length(),anotherString.length())
+
+ beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
+ subu $v0,$a2,$a3 # if $t2==0 return
+ # (this.length() - anotherString.length())
+1:
+ lhu $t0,MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1,MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0,$t1,9f # if this.charAt(i) != anotherString.charAt(i)
+ subu $v0,$t0,$t1 # return (this.charAt(i) - anotherString.charAt(i))
+ daddiu $a0,$a0,2 # point at this.charAt(i++)
+ subu $t2,$t2,1 # new value of
+ # min(this.length(),anotherString.length())-i
+ bnez $t2,1b
+ daddiu $a1,$a1,2 # point at anotherString.charAt(i++)
+ subu $v0,$a2,$a3
+
+9:
+ j $ra
+ nop
+END art_quick_string_compareto
+
+/* java.lang.String.indexOf(int ch, int fromIndex=0) */
+ENTRY_NO_GP art_quick_indexof
+/* $a0 holds address of "this" */
+/* $a1 holds address of "ch" */
+/* $a2 holds address of "fromIndex" */
+ lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ subu $t0,$t0,$a2 # this.length() - offset
+ blez $t0,6f # if this.length()-offset <= 0
+ li $v0,-1 # return -1;
+
+ sll $v0,$a2,1 # $a0 += $a2 * 2
+ daddu $a0,$a0,$v0 # " " " " "
+ move $v0,$a2 # Set i to offset.
+
+1:
+ lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3,$a1,6f # return i;
+ daddu $a0,$a0,2 # i++
+ subu $t0,$t0,1 # this.length() - i
+ bnez $t0,1b # while this.length() - i > 0
+ addu $v0,$v0,1 # i++
+
+ li $v0,-1 # if this.length() - i <= 0
+ # return -1;
+
+6:
+ j $ra
+ nop
+END art_quick_indexof
+
+ .set pop
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index ef5edbb227..fbacdbc930 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -113,7 +113,8 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMal
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 0d88dd0dc5..077d2db870 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,10 +16,8 @@
#include "context_x86.h"
-#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
namespace art {
namespace x86 {
@@ -37,9 +35,7 @@ void X86Context::Reset() {
arg0_ = 0;
}
-void X86Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void X86Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
@@ -47,7 +43,7 @@ void X86Context::FillCalleeSaves(const StackVisitor& fr) {
frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs)); // Return address spill.
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
@@ -58,9 +54,9 @@ void X86Context::FillCalleeSaves(const StackVisitor& fr) {
for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
// Two void* per XMM register.
fprs_[2 * fp_reg] = reinterpret_cast<uint32_t*>(
- fr.CalleeSaveAddress(spill_pos + 1, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos + 1, frame_info.FrameSizeInBytes()));
fprs_[2 * fp_reg + 1] = reinterpret_cast<uint32_t*>(
- fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes()));
spill_pos += 2;
}
DCHECK_EQ(spill_pos,
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index 59beb12ffa..f482d9ffcb 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -34,7 +34,7 @@ class X86Context : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(ESP, new_sp);
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 4a106e44c6..2f485ae644 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -788,6 +788,7 @@ END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 12c94bc598..7c49e9c2b2 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,10 +16,8 @@
#include "context_x86_64.h"
-#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
-#include "stack.h"
namespace art {
namespace x86_64 {
@@ -37,9 +35,7 @@ void X86_64Context::Reset() {
arg0_ = 0;
}
-void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtCode code = fr.GetCurrentCode();
- const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
+void X86_64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
@@ -47,7 +43,7 @@ void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
frame_info.CoreSpillMask() & ~(static_cast<uint32_t>(-1) << kNumberOfCpuRegisters);
DCHECK_EQ(1, POPCOUNT(frame_info.CoreSpillMask() & ~core_regs)); // Return address spill.
for (uint32_t core_reg : HighToLowBits(core_regs)) {
- gprs_[core_reg] = fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes());
+ gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
++spill_pos;
}
DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) - 1);
@@ -57,7 +53,7 @@ void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
DCHECK_EQ(0u, fp_regs & (static_cast<uint32_t>(-1) << kNumberOfFloatRegisters));
for (uint32_t fp_reg : HighToLowBits(fp_regs)) {
fprs_[fp_reg] = reinterpret_cast<uint64_t*>(
- fr.CalleeSaveAddress(spill_pos, frame_info.FrameSizeInBytes()));
+ CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes()));
++spill_pos;
}
DCHECK_EQ(spill_pos,
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index f05b7f093f..46f2b63848 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -34,7 +34,7 @@ class X86_64Context : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) OVERRIDE;
void SetSP(uintptr_t new_sp) OVERRIDE {
SetGPR(RSP, new_sp);
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5c413d20f8..95f0ccb419 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -809,6 +809,7 @@ END_MACRO
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
diff --git a/runtime/art_code.cc b/runtime/art_code.cc
deleted file mode 100644
index ad0b170079..0000000000
--- a/runtime/art_code.cc
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_code.h"
-
-#include "art_method.h"
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "handle_scope.h"
-#include "jit/jit.h"
-#include "jit/jit_code_cache.h"
-#include "mapping_table.h"
-#include "oat.h"
-#include "runtime.h"
-#include "utils.h"
-
-namespace art {
-
- // Converts a dex PC to a native PC.
-uintptr_t ArtCode::ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- if (IsOptimized(sizeof(void*))) {
- // Optimized code does not have a mapping table. Search for the dex-to-pc
- // mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
-
- // All stack maps are stored in the same CodeItem section, safepoint stack
- // maps first, then catch stack maps. We use `is_for_catch_handler` to select
- // the order of iteration.
- StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
- if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
- }
- } else {
- MappingTable table((entry_point != nullptr) ? GetMappingTable(sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- }
-
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(method_);
- }
- return UINTPTR_MAX;
-}
-
-bool ArtCode::IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
- // Temporary solution for detecting if a method has been optimized: the compiler
- // does not create a GC map. Instead, the vmap table contains the stack map
- // (as in stack_map.h).
- return !method_->IsNative()
- && method_->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
- && GetQuickOatEntryPoint(pointer_size) != nullptr
- && GetNativeGcMap(pointer_size) == nullptr;
-}
-
-CodeInfo ArtCode::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized(sizeof(void*)));
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(sizeof(void*)));
- DCHECK(code_pointer != nullptr);
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- const void* data =
- reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- return CodeInfo(data);
-}
-
-uintptr_t ArtCode::NativeQuickPcOffset(const uintptr_t pc) {
- const void* quick_entry_point = GetQuickOatEntryPoint(sizeof(void*));
- CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point,
- Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*)));
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-
-uint32_t ArtCode::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
- if (IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
- if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ? GetMappingTable(sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
- // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
- DCHECK(method_->IsNative() || method_->IsCalleeSaveMethod() || method_->IsProxyMethod())
- << PrettyMethod(method_);
- return DexFile::kDexNoIndex; // Special no mapping case
- }
- // Assume the caller wants a pc-to-dex mapping so check here first.
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- // Now check dex-to-pc mappings.
- typedef MappingTable::DexToPcIterator It2;
- for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- }
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
- << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
- << ") in " << PrettyMethod(method_);
- }
- return DexFile::kDexNoIndex;
-}
-
-const uint8_t* ArtCode::GetNativeGcMap(size_t pointer_size) {
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-const uint8_t* ArtCode::GetVmapTable(size_t pointer_size) {
- CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-const uint8_t* ArtCode::GetMappingTable(size_t pointer_size) {
- const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- if (code_pointer == nullptr) {
- return nullptr;
- }
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t shorty_len;
- const char* shorty = method->GetShorty(&shorty_len);
- uint32_t refs = 0;
- for (uint32_t i = 1; i < shorty_len ; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- return refs;
-}
-
-QuickMethodFrameInfo ArtCode::GetQuickFrameInfo() {
- Runtime* runtime = Runtime::Current();
-
- if (UNLIKELY(method_->IsAbstract())) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
-
- // This goes before IsProxyMethod since runtime methods have a null declaring class.
- if (UNLIKELY(method_->IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(method_);
- }
-
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
- if (UNLIKELY(method_->IsProxyMethod())) {
- if (method_->IsDirect()) {
- CHECK(method_->IsConstructor());
- const void* code_pointer =
- EntryPointToCodePointer(method_->GetEntryPointFromQuickCompiledCode());
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
- }
-
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*));
- ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods. And we really shouldn't see a failure for non-native methods here.
- DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
-
- if (class_linker->IsQuickGenericJniStub(entry_point)) {
- // Generic JNI frame.
- DCHECK(method_->IsNative());
- uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method_) + 1;
- size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
- // Callee saves + handle scope + method ref + alignment
- // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
- sizeof(ArtMethod*) + scope_size, kStackAlignment);
- return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
- }
-
- const void* code_pointer = EntryPointToCodePointer(entry_point);
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
-}
-
-void ArtCode::AssertPcIsWithinQuickCode(uintptr_t pc) {
- if (method_->IsNative() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
- return;
- }
- if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
- return;
- }
- const void* code = method_->GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
- return;
- }
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickResolutionStub(code)) {
- return;
- }
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
- return;
- }
-
- uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
- EntryPointToCodePointer(code))[-1].code_size_;
- uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
- CHECK(code_start <= pc && pc <= (code_start + code_size))
- << PrettyMethod(method_)
- << " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << code_size;
-}
-
-bool ArtCode::PcIsWithinQuickCode(uintptr_t pc) {
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
- method_->GetEntryPointFromQuickCompiledCode()));
- if (code == 0) {
- return pc == 0;
- }
- uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
- return code <= pc && pc <= (code + code_size);
-}
-
-const void* ArtCode::GetQuickOatEntryPoint(size_t pointer_size) {
- if (method_->IsAbstract() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(method_, pointer_size);
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods.
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickGenericJniStub(code)) {
- return nullptr;
- }
- return code;
-}
-
-} // namespace art
diff --git a/runtime/art_code.h b/runtime/art_code.h
deleted file mode 100644
index 1d2d898ed6..0000000000
--- a/runtime/art_code.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ART_CODE_H_
-#define ART_RUNTIME_ART_CODE_H_
-
-#include "base/mutex.h"
-#include "offsets.h"
-#include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
-
-namespace art {
-
-class ArtMethod;
-
-class ArtCode FINAL {
- public:
- explicit ArtCode(ArtMethod** method) : method_(*method) {}
- explicit ArtCode(ArtMethod* method) : method_(method) {}
- ArtCode() : method_(nullptr) {}
-
- // Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
- const uint8_t* GetNativeGcMap(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetVmapTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetMappingTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- return FrameOffset(GetFrameSizeInBytes() - sizeof(void*));
- }
-
- template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
- if (kCheckFrameSize) {
- DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
- }
- return result;
- }
-
- const void* GetQuickOatEntryPoint(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
-
- void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- bool PcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
- DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
- return FrameOffset(handle_scope_offset);
- }
-
- ArtMethod* GetMethod() const { return method_; }
-
- private:
- ArtMethod* method_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ART_CODE_H_
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f9d9077261..f5befdfc07 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -384,4 +384,28 @@ const uint8_t* ArtMethod::GetQuickenedInfo() {
return oat_method.GetVmapTable();
}
+const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
+ if (IsRuntimeMethod() || IsProxyMethod()) {
+ return nullptr;
+ }
+
+ Runtime* runtime = Runtime::Current();
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
+ DCHECK(code != nullptr);
+
+ if (runtime->GetClassLinker()->IsQuickGenericJniStub(code)) {
+ // The generic JNI does not have any method header.
+ return nullptr;
+ }
+
+ code = EntryPointToCodePointer(code);
+ OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+ reinterpret_cast<uintptr_t>(code) - sizeof(OatQuickMethodHeader));
+
+ // TODO(ngeoffray): validate the pc. Note that unit tests can give unrelated pcs (for
+ // example arch_test).
+ UNUSED(pc);
+ return method_header;
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 9743250cc0..9f1495cf39 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -32,6 +32,7 @@
namespace art {
union JValue;
+class OatQuickMethodHeader;
class ProfilingInfo;
class ScopedObjectAccessAlreadyRunnable;
class StringPiece;
@@ -434,6 +435,11 @@ class ArtMethod FINAL {
const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Returns the method header for the compiled code containing 'pc'. Note that runtime
+ // methods will return null for this method, as they are not oat based.
+ const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index d98fc5179f..69f6fe96ff 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,6 +19,7 @@
#if defined(__cplusplus)
#include "art_method.h"
+#include "gc/allocator/rosalloc.h"
#include "lock_word.h"
#include "mirror/class.h"
#include "mirror/string.h"
@@ -53,6 +54,14 @@ static inline void CheckAsmSupportOffsetsAndSizes() {
#define ADD_TEST_EQ(x, y)
#endif
+#if defined(__LP64__)
+#define POINTER_SIZE_SHIFT 3
+#else
+#define POINTER_SIZE_SHIFT 2
+#endif
+ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
+ static_cast<size_t>(__SIZEOF_POINTER__))
+
// Size of references to the heap on the stack.
#define STACK_REFERENCE_SIZE 4
ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
@@ -62,6 +71,10 @@ ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReferenc
ADD_TEST_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE),
sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+#define COMPRESSED_REFERENCE_SIZE_SHIFT 2
+ADD_TEST_EQ(static_cast<size_t>(1U << COMPRESSED_REFERENCE_SIZE_SHIFT),
+ static_cast<size_t>(COMPRESSED_REFERENCE_SIZE))
+
// Note: these callee save methods loads require read barriers.
// Offset of field Runtime::callee_save_methods_[kSaveAll]
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
@@ -120,6 +133,18 @@ ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.rosalloc_runs.
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
+ art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
+#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 34 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
+ art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
+#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 35 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
+ art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
@@ -236,6 +261,44 @@ ADD_TEST_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), art::kObjectAlignment -
ADD_TEST_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED),
~static_cast<uint32_t>(art::kObjectAlignment - 1))
+#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
+ADD_TEST_EQ(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 4
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSizeShift))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 15
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff0
+ADD_TEST_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32),
+ ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff0
+ADD_TEST_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64),
+ ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))
+
+#define ROSALLOC_SLOT_NEXT_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset()))
+// Assert this so that we can avoid zeroing the next field by installing the class pointer.
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f2fc074fb..f1d0a5fbff 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -49,15 +49,13 @@ class NoopAllocator FINAL : public Allocator {
explicit NoopAllocator() {}
~NoopAllocator() {}
- void* Alloc(size_t size) {
- UNUSED(size);
+ void* Alloc(size_t size ATTRIBUTE_UNUSED) {
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
UNREACHABLE();
}
- void Free(void* p) {
+ void Free(void* p ATTRIBUTE_UNUSED) {
// Noop.
- UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 3422625282..f9960acfb8 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -115,9 +115,7 @@ class TrackingAllocatorImpl : public std::allocator<T> {
// Used internally by STL data structures.
template <class U>
- TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) noexcept {
- UNUSED(alloc);
- }
+ TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {}
// Used internally by STL data structures.
TrackingAllocatorImpl() noexcept {
@@ -131,8 +129,7 @@ class TrackingAllocatorImpl : public std::allocator<T> {
typedef TrackingAllocatorImpl<U, kTag> other;
};
- pointer allocate(size_type n, const_pointer hint = 0) {
- UNUSED(hint);
+ pointer allocate(size_type n, const_pointer hint ATTRIBUTE_UNUSED = 0) {
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 004895ab42..4e9282f9a1 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -119,13 +119,13 @@ class ArenaAllocatorStatsImpl<false> {
ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
- void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
+ void Copy(const ArenaAllocatorStatsImpl& other ATTRIBUTE_UNUSED) {}
+ void RecordAlloc(size_t bytes ATTRIBUTE_UNUSED, ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
- void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
- UNUSED(os); UNUSED(first); UNUSED(lost_bytes_adjustment);
- }
+ void Dump(std::ostream& os ATTRIBUTE_UNUSED,
+ const Arena* first ATTRIBUTE_UNUSED,
+ ssize_t lost_bytes_adjustment ATTRIBUTE_UNUSED) const {}
};
template <bool kCount>
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 9174d2d6d9..e2d4c2411e 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -176,8 +176,8 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind {
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index 03f457534e..e19aecb712 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -54,7 +54,7 @@ class DebugStackRefCounterImpl<false> {
template <>
class DebugStackReferenceImpl<false> {
public:
- explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter) { UNUSED(counter); }
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter ATTRIBUTE_UNUSED) {}
DebugStackReferenceImpl(const DebugStackReferenceImpl& other) = default;
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) = default;
void CheckTop() { }
@@ -63,7 +63,7 @@ class DebugStackReferenceImpl<false> {
template <>
class DebugStackIndirectTopRefImpl<false> {
public:
- explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref) { UNUSED(ref); }
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref ATTRIBUTE_UNUSED) {}
DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other) = default;
DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) = default;
void CheckTop() { }
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f2b1cc06d7..4819f06bb4 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -127,8 +127,8 @@ class HashSet {
using size_type = size_t;
using difference_type = ptrdiff_t;
- static constexpr double kDefaultMinLoadFactor = 0.5;
- static constexpr double kDefaultMaxLoadFactor = 0.9;
+ static constexpr double kDefaultMinLoadFactor = 0.4;
+ static constexpr double kDefaultMaxLoadFactor = 0.7;
static constexpr size_t kMinBuckets = 1000;
// If we don't own the data, this will create a new array which owns the data.
@@ -138,14 +138,18 @@ class HashSet {
elements_until_expand_ = 0;
}
- HashSet()
+ HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
+
+ HashSet(double min_load_factor, double max_load_factor)
: num_elements_(0u),
num_buckets_(0u),
elements_until_expand_(0u),
owns_data_(false),
data_(nullptr),
- min_load_factor_(kDefaultMinLoadFactor),
- max_load_factor_(kDefaultMaxLoadFactor) {
+ min_load_factor_(min_load_factor),
+ max_load_factor_(max_load_factor) {
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
}
explicit HashSet(const allocator_type& alloc)
@@ -459,6 +463,31 @@ class HashSet {
return errors;
}
+ double GetMinLoadFactor() const {
+ return min_load_factor_;
+ }
+
+ double GetMaxLoadFactor() const {
+ return max_load_factor_;
+ }
+
+ // Change the load factor of the hash set. If the current load factor is greater than the max
+ // specified, then we resize the hash table storage.
+ void SetLoadFactor(double min_load_factor, double max_load_factor) {
+ DCHECK_LT(min_load_factor, max_load_factor);
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
+ min_load_factor_ = min_load_factor;
+ max_load_factor_ = max_load_factor;
+ elements_until_expand_ = NumBuckets() * max_load_factor_;
+ // If the current load factor isn't in the range, then resize to the mean of the minimum and
+ // maximum load factor.
+ const double load_factor = CalculateLoadFactor();
+ if (load_factor > max_load_factor_) {
+ Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ }
+ }
+
private:
T& ElementForIndex(size_t index) {
DCHECK_LT(index, NumBuckets());
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 6d2c5e0f2c..743e98ed84 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -196,6 +196,24 @@ TEST_F(HashSetTest, TestShrink) {
}
}
+TEST_F(HashSetTest, TestLoadFactor) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ static constexpr size_t kStringCount = 1000;
+ static constexpr double kEpsilon = 0.01;
+ for (size_t i = 0; i < kStringCount; ++i) {
+ hash_set.Insert(RandomString(i % 10 + 1));
+ }
+ // Check that changing the load factor resizes the table to be within the target range.
+ EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.1, 0.3);
+ EXPECT_DOUBLE_EQ(0.1, hash_set.GetMinLoadFactor());
+ EXPECT_DOUBLE_EQ(0.3, hash_set.GetMaxLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.6, 0.8);
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+}
+
TEST_F(HashSetTest, TestStress) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::unordered_multiset<std::string> std_set;
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 4f513707bb..2554fb0754 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -132,7 +132,7 @@ class ScopedArenaAllocator
ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Allow a delete-expression to destroy but not deallocate allocators created by Create().
- static void operator delete(void* ptr) { UNUSED(ptr); }
+ static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
ArenaStack* const arena_stack_;
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 7c64449dd9..562c2bf01c 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -146,8 +146,8 @@ class ScopedArenaAllocatorAdapter
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ScopedArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index d793bb6153..9c83cf5f71 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -148,6 +148,19 @@ class StringPiece {
StringPiece substr(size_type pos, size_type n = npos) const;
+ int Compare(const StringPiece& rhs) const {
+ const int r = memcmp(data(), rhs.data(), std::min(size(), rhs.size()));
+ if (r != 0) {
+ return r;
+ }
+ if (size() < rhs.size()) {
+ return -1;
+ } else if (size() > rhs.size()) {
+ return 1;
+ }
+ return 0;
+ }
+
private:
// Pointer to char data, not necessarily zero terminated.
const char* ptr_;
@@ -201,9 +214,7 @@ inline bool operator!=(const StringPiece& x, const char* y) {
}
inline bool operator<(const StringPiece& x, const StringPiece& y) {
- const int r = memcmp(x.data(), y.data(),
- std::min(x.size(), y.size()));
- return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+ return x.Compare(y) < 0;
}
inline bool operator>(const StringPiece& x, const StringPiece& y) {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e8973511e3..b9ea475149 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
-#include "art_code.h"
#include "art_method-inl.h"
#include "gc_map.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -54,7 +54,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -65,7 +65,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -109,7 +109,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- NativePcOffsetToReferenceMap map(GetCurrentCode().GetNativeGcMap(sizeof(void*)));
+ NativePcOffsetToReferenceMap map(GetCurrentOatQuickMethodHeader()->GetNativeGcMap());
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
CHECK(ref_bitmap);
for (int i = 0; i < number_of_references; ++i) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 02f2e0b207..81622e14ed 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1189,7 +1189,9 @@ mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length
static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
-mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc) {
StackHandleScope<6> hs(self);
auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
@@ -1204,22 +1206,15 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
}
DexCacheArraysLayout layout(image_pointer_size_, &dex_file);
uint8_t* raw_arrays = nullptr;
- if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
+ if (dex_file.GetOatDexFile() != nullptr &&
+ dex_file.GetOatDexFile()->GetDexCacheArrays() != nullptr) {
+ raw_arrays = dex_file.GetOatDexFile()->GetDexCacheArrays();
+ } else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
// NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
- if (sizeof(void*) == 8u && image_pointer_size_ == 4u) {
- // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
- // in the low 4GiB address space so that we can store pointers in 32-bit fields.
- // This is conveniently provided by the linear allocator.
- raw_arrays = reinterpret_cast<uint8_t*>(
- Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size())); // Zero-initialized.
- } else {
- raw_arrays = reinterpret_cast<uint8_t*>(calloc(layout.Size(), 1u)); // Zero-initialized.
- if (raw_arrays == nullptr) {
- return nullptr;
- }
- }
+ // Zero-initialized.
+ raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
@@ -1590,7 +1585,9 @@ mirror::Class* ClassLinker::DefineClass(Thread* self,
self->AssertPendingOOMException();
return nullptr;
}
- mirror::DexCache* dex_cache = RegisterDexFile(dex_file);
+ mirror::DexCache* dex_cache = RegisterDexFile(
+ dex_file,
+ GetOrCreateAllocatorForClassLoader(class_loader.Get()));
if (dex_cache == nullptr) {
self->AssertPendingOOMException();
return nullptr;
@@ -2093,6 +2090,19 @@ LinearAlloc* ClassLinker::GetAllocatorForClassLoader(mirror::ClassLoader* class_
return allocator;
}
+LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) {
+ if (class_loader == nullptr) {
+ return Runtime::Current()->GetLinearAlloc();
+ }
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ LinearAlloc* allocator = class_loader->GetAllocator();
+ if (allocator == nullptr) {
+ allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loader->SetAllocator(allocator);
+ }
+ return allocator;
+}
+
void ClassLinker::LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
@@ -2251,7 +2261,10 @@ void ClassLinker::LoadMethod(Thread* self,
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(
+ self,
+ dex_file,
+ Runtime::Current()->GetLinearAlloc())));
CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
<< dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
@@ -2287,7 +2300,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
dex_cache->SetDexFile(&dex_file);
}
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
@@ -2300,7 +2313,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file, linear_alloc)));
WriterMutexLock mu(self, dex_lock_);
mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
@@ -3097,6 +3110,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
std::string descriptor(GetDescriptorForProxy(klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ // Needs to be before we insert the class so that the allocator field is set.
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
@@ -3104,9 +3120,6 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
CHECK(existing == nullptr);
- // Needs to be after we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
-
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
@@ -3486,28 +3499,31 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
if (!klass->IsInterface()) {
// Initialize interfaces with default methods for the JLS.
size_t num_direct_interfaces = klass->NumDirectInterfaces();
- for (size_t i = 0; i < num_direct_interfaces; i++) {
+ // Only setup the (expensive) handle scope if we actually need to.
+ if (UNLIKELY(num_direct_interfaces > 0)) {
StackHandleScope<1> hs_iface(self);
- Handle<mirror::Class> handle_scope_iface(
- hs_iface.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)));
- CHECK(handle_scope_iface.Get() != nullptr);
- CHECK(handle_scope_iface->IsInterface());
- if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
- // We have already done this once for this interface. Skip it.
- continue;
- }
- // We cannot just call initialize class directly because we need to ensure that ALL interfaces
- // with default methods are initialized. Non-default interface initialization will not affect
- // other non-default super-interfaces.
- bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
- handle_scope_iface,
- can_init_statics,
- can_init_parents);
- if (!iface_initialized) {
- ObjectLock<mirror::Class> lock(self, klass);
- // Initialization failed because one of our interfaces with default methods is erroneous.
- mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
- return false;
+ MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr));
+ for (size_t i = 0; i < num_direct_interfaces; i++) {
+ handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass, i));
+ CHECK(handle_scope_iface.Get() != nullptr);
+ CHECK(handle_scope_iface->IsInterface());
+ if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
+ // We have already done this for this interface. Skip it.
+ continue;
+ }
+ // We cannot just call initialize class directly because we need to ensure that ALL
+ // interfaces with default methods are initialized. Non-default interface initialization
+ // will not affect other non-default super-interfaces.
+ bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
+ handle_scope_iface,
+ can_init_statics,
+ can_init_parents);
+ if (!iface_initialized) {
+ ObjectLock<mirror::Class> lock(self, klass);
+ // Initialization failed because one of our interfaces with default methods is erroneous.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ return false;
+ }
}
}
}
@@ -3609,18 +3625,22 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
bool can_init_parents) {
CHECK(iface->IsInterface());
size_t num_direct_ifaces = iface->NumDirectInterfaces();
- // First we initialize all of iface's super-interfaces recursively.
- for (size_t i = 0; i < num_direct_ifaces; i++) {
- mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
- if (!super_iface->HasBeenRecursivelyInitialized()) {
- // Recursive step
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> handle_super_iface(hs.NewHandle(super_iface));
- if (!InitializeDefaultInterfaceRecursive(self,
- handle_super_iface,
- can_init_statics,
- can_init_parents)) {
- return false;
+ // Only create the (expensive) handle scope if we need it.
+ if (UNLIKELY(num_direct_ifaces > 0)) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
+ // First we initialize all of iface's super-interfaces recursively.
+ for (size_t i = 0; i < num_direct_ifaces; i++) {
+ mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ if (!super_iface->HasBeenRecursivelyInitialized()) {
+ // Recursive step
+ handle_super_iface.Assign(super_iface);
+ if (!InitializeDefaultInterfaceRecursive(self,
+ handle_super_iface,
+ can_init_statics,
+ can_init_parents)) {
+ return false;
+ }
}
}
}
@@ -3945,13 +3965,13 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
ClassLoaderData data;
data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
data.class_table = class_table;
- data.allocator = Runtime::Current()->CreateLinearAlloc();
- class_loaders_.push_back(data);
// Don't already have a class table, add it to the class loader.
CHECK(class_loader->GetClassTable() == nullptr);
- CHECK(class_loader->GetAllocator() == nullptr);
class_loader->SetClassTable(data.class_table);
- class_loader->SetAllocator(data.allocator);
+ // Should have been set when we registered the dex file.
+ data.allocator = class_loader->GetAllocator();
+ CHECK(data.allocator != nullptr);
+ class_loaders_.push_back(data);
}
return class_table;
}
@@ -6349,6 +6369,21 @@ void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
}
}
+void ClassLinker::InsertDexFileInToClassLoader(mirror::Object* dex_file,
+ mirror::ClassLoader* class_loader) {
+ DCHECK(dex_file != nullptr);
+ DCHECK(class_loader != nullptr);
+ Thread* const self = Thread::Current();
+ WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
+ ClassTable* const table = class_loader->GetClassTable();
+ DCHECK(table != nullptr);
+ if (table->InsertDexFile(dex_file)) {
+ // It was not already inserted, perform the write barrier to let the GC know the class loader's
+ // class table was modified.
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ }
+}
+
void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 93161f7bb7..a2d38ac620 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -319,7 +319,7 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- mirror::DexCache* RegisterDexFile(const DexFile& dex_file)
+ mirror::DexCache* RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
@@ -526,12 +526,24 @@ class ClassLinker {
// Clean up class loaders, this needs to happen after JNI weak globals are cleared.
void CleanupClassLoaders()
- SHARED_REQUIRES(Locks::mutator_lock_)
- REQUIRES(!Locks::classlinker_classes_lock_);
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
+ // allocator for this class loader is already created.
static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
+ // set it. TODO: Consider using a lock other than classlinker_classes_lock_.
+ static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void InsertDexFileInToClassLoader(mirror::Object* dex_file, mirror::ClassLoader* class_loader)
+ REQUIRES(!Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
struct ClassLoaderData {
jweak weak_root; // Weak root to enable class unloading.
@@ -570,7 +582,9 @@ class ClassLinker {
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
+ mirror::DexCache* AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index dc60a2c239..aef02b6d5d 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -37,6 +37,9 @@ void ClassTable::VisitRoots(const Visitor& visitor) {
visitor.VisitRoot(root.AddressWithoutBarrier());
}
}
+ for (GcRoot<mirror::Object>& root : dex_files_) {
+ visitor.VisitRoot(root.AddressWithoutBarrier());
+ }
}
} // namespace art
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index fc8e6c49da..3ed1c9540d 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -21,7 +21,9 @@
namespace art {
ClassTable::ClassTable() {
- classes_.push_back(ClassSet());
+ Runtime* const runtime = Runtime::Current();
+ classes_.push_back(ClassSet(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor()));
}
void ClassTable::FreezeSnapshot() {
@@ -135,4 +137,15 @@ std::size_t ClassTable::ClassDescriptorHashEquals::operator()(const char* descri
return ComputeModifiedUtf8Hash(descriptor);
}
+bool ClassTable::InsertDexFile(mirror::Object* dex_file) {
+ DCHECK(dex_file != nullptr);
+ for (GcRoot<mirror::Object>& root : dex_files_) {
+ if (root.Read() == dex_file) {
+ return false;
+ }
+ }
+ dex_files_.push_back(GcRoot<mirror::Object>(dex_file));
+ return true;
+}
+
} // namespace art
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 727392eb6f..002bb564ab 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -50,12 +50,14 @@ class ClassTable {
// Used by image writer for checking.
bool Contains(mirror::Class* klass)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Freeze the current class tables by allocating a new table and never updating or modifying the
// existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
void FreezeSnapshot()
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
size_t NumZygoteClasses() const SHARED_REQUIRES(Locks::classlinker_classes_lock_);
@@ -65,17 +67,18 @@ class ClassTable {
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
template<class Visitor>
void VisitRoots(Visitor& visitor)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS;
+ NO_THREAD_SAFETY_ANALYSIS
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
template<class Visitor>
void VisitRoots(const Visitor& visitor)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS;
+ NO_THREAD_SAFETY_ANALYSIS
+ SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
// Return false if the callback told us to exit.
bool Visit(ClassVisitor* visitor)
@@ -85,13 +88,21 @@ class ClassTable {
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
void Insert(mirror::Class* klass)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
void InsertWithHash(mirror::Class* klass, size_t hash)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Returns true if the class was found and removed, false otherwise.
bool Remove(const char* descriptor)
- REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Return true if we inserted the dex file, false if it already exists.
+ bool InsertDexFile(mirror::Object* dex_file)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
class ClassDescriptorHashEquals {
@@ -123,6 +134,9 @@ class ClassTable {
// TODO: shard lock to have one per class loader.
// We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot.
std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_);
+ // Dex files used by the class loader which may not be owned by the class loader. We keep these
+ // live so that we do not have issues closing any of the dex files.
+ std::vector<GcRoot<mirror::Object>> dex_files_ GUARDED_BY(Locks::classlinker_classes_lock_);
};
} // namespace art
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a4f95b6d2f..b17b76e2ea 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1903,8 +1903,7 @@ void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::Expa
JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
return error;
}
@@ -1931,8 +1930,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 17e6aac357..e57569e140 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,7 +16,6 @@
#include "entrypoints/entrypoint_utils.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/mutex.h"
@@ -31,6 +30,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
+#include "oat_quick_method_header.h"
#include "reflection.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
@@ -359,33 +359,31 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- ArtCode current_code = GetCallingCodeFrom(caller_sp);
+ const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+ uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
+ (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
ArtMethod* caller = outer_method;
- if ((outer_method != nullptr) && current_code.IsOptimized(sizeof(void*))) {
- const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
- uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
- (reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
- if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- uintptr_t native_pc_offset = current_code.NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code.GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
- }
- } else {
- // We're instrumenting, just use the StackVisitor which knows how to
- // handle instrumented frames.
- NthCallerVisitor visitor(Thread::Current(), 1, true);
- visitor.WalkStack();
- caller = visitor.caller;
- if (kIsDebugBuild) {
- // Avoid doing the check below.
- do_caller_check = false;
+ if (outer_method != nullptr) {
+ const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
+ if (current_code->IsOptimized()) {
+ if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
+ uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ DCHECK(stack_map.IsValid());
+ if (stack_map.HasInlineInfo(encoding)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
+ }
+ } else {
+ // We're instrumenting, just use the StackVisitor which knows how to
+ // handle instrumented frames.
+ NthCallerVisitor visitor(Thread::Current(), 1, true);
+ visitor.WalkStack();
+ caller = visitor.caller;
}
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 171ace27a5..0469ee6eb6 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,7 +20,6 @@
#include <jni.h>
#include <stdint.h>
-#include "art_code.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_instruction.h"
@@ -40,6 +39,7 @@ namespace mirror {
class ArtField;
class ArtMethod;
+class OatQuickMethodHeader;
class ScopedObjectAccessAlreadyRunnable;
class Thread;
@@ -185,10 +185,6 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check = false);
-inline ArtCode GetCallingCodeFrom(ArtMethod** sp) {
- return ArtCode(sp);
-}
-
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 28c62a8524..4e4f8510ec 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -56,9 +56,8 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -83,9 +82,8 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 377675ea85..5eda6d6bd3 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "art_code.h"
#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
@@ -30,6 +29,7 @@
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_quick_method_header.h"
#include "quick_exception_handler.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -295,8 +295,6 @@ class QuickArgumentVisitor {
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
- CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
- GetCallingCodeFrom(sp).GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -323,10 +321,11 @@ class QuickArgumentVisitor {
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
- uintptr_t outer_pc_offset = GetCallingCodeFrom(caller_sp).NativeQuickPcOffset(outer_pc);
+ const OatQuickMethodHeader* current_code = (*caller_sp)->GetOatQuickMethodHeader(outer_pc);
+ uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
- if (GetCallingCodeFrom(caller_sp).IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetCallingCodeFrom(caller_sp).GetOptimizedCodeInfo();
+ if (current_code->IsOptimized()) {
+ CodeInfo code_info = current_code->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -337,7 +336,7 @@ class QuickArgumentVisitor {
return stack_map.GetDexPc(encoding);
}
} else {
- return GetCallingCodeFrom(caller_sp).ToDexPc(outer_pc);
+ return current_code->ToDexPc(*caller_sp, outer_pc);
}
}
@@ -842,10 +841,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
- DCHECK_EQ(GetCallingCodeFrom(sp).GetFrameSizeInBytes(),
- ArtCode(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs))
- .GetFrameSizeInBytes())
- << PrettyMethod(proxy_method);
self->VerifyStack();
// Start new JNI local reference state.
JNIEnvExt* env = self->GetJniEnv();
@@ -1524,9 +1519,9 @@ class ComputeNativeCallFrameSize {
return sp8;
}
- virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
+ virtual void WalkHeader(
+ BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(sm);
}
void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 5299394d7c..4e8591339c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -49,7 +49,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec << " ISA " << isa;
@@ -58,8 +58,8 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
- EXPECT_EQ(ArtCode(save_method).GetReturnPcOffset().SizeValue(), pc_offset)
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
+ EXPECT_EQ(frame_info.GetReturnPcOffset(), pc_offset)
<< "Expected and real pc offset differs for " << type
<< " core spills=" << std::hex << frame_info.CoreSpillMask()
<< " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index da1d80ea33..4de8a8ead9 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -26,6 +26,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "mirror/stack_trace_element.h"
+#include "oat_quick_method_header.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
@@ -169,7 +170,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
- QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method);
ASSERT_EQ(kStackAlignment, 16U);
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
@@ -186,15 +187,15 @@ TEST_F(ExceptionTest, StackTraceElement) {
fake_stack.push_back(0);
}
- fake_stack.push_back(
- ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ method_g_, dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
- fake_stack.push_back(
- ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
+ method_g_, dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 30a0983986..52ccbeeca0 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -20,10 +20,10 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
-#include "art_code.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "mirror/class.h"
+#include "oat_quick_method_header.h"
#include "sigchain.h"
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -360,17 +360,17 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
return false;
}
- ArtCode art_code(method_obj);
+ const OatQuickMethodHeader* method_header = method_obj->GetOatQuickMethodHeader(return_pc);
// We can be certain that this is a method now. Check if we have a GC map
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
uint32_t sought_offset = return_pc -
- reinterpret_cast<uintptr_t>(art_code.GetQuickOatEntryPoint(sizeof(void*)));
+ reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
- uint32_t dexpc = art_code.ToDexPc(return_pc, false);
+ uint32_t dexpc = method_header->ToDexPc(method_obj, return_pc, false);
VLOG(signals) << "dexpc: " << dexpc;
return !check_dex_pc || dexpc != DexFile::kDexNoIndex;
}
@@ -406,9 +406,8 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
manager_->AddHandler(this, false);
}
-bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
+bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 277d319035..eb0852af6e 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -41,8 +41,7 @@ class RememberedSetCardVisitor {
explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
: dirty_cards_(dirty_cards) {}
- void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 3d85395377..e747f00c92 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -77,7 +77,8 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
}
extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
- void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
if (used_bytes == 0) {
return;
@@ -86,10 +87,10 @@ extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+extern "C" void DlmallocObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
- UNUSED(start);
- UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 87f1392920..3ce3d634f5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -131,6 +131,7 @@ class RosAlloc {
private:
Slot* next_; // Next slot in the list.
+ friend class RosAlloc;
};
// We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
@@ -302,6 +303,7 @@ class RosAlloc {
// free without traversing the whole free list.
uint32_t size_;
uint32_t padding_ ATTRIBUTE_UNUSED;
+ friend class RosAlloc;
};
// Represents a run of memory slots of the same size.
@@ -482,7 +484,7 @@ class RosAlloc {
static constexpr uint8_t kMagicNumFree = 43;
// The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
- // The number of smaller size brackets that are 16 bytes apart.
+ // The number of smaller size brackets that are the quantum size apart.
static constexpr size_t kNumOfQuantumSizeBrackets = 32;
// The sizes (the slot sizes, in bytes) of the size brackets.
static size_t bracketSizes[kNumOfSizeBrackets];
@@ -520,9 +522,7 @@ class RosAlloc {
}
// Returns true if the given allocation size is for a thread local allocation.
static bool IsSizeForThreadLocal(size_t size) {
- DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
- size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
- bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
+ bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize;
DCHECK(size > kLargeSizeThreshold ||
(is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
return is_size_for_thread_local;
@@ -634,6 +634,16 @@ class RosAlloc {
// are less than this index. We use shared (current) runs for the rest.
static const size_t kNumThreadLocalSizeBrackets = 8;
+ // The size of the largest bracket we use thread-local runs for.
+ // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
+ static const size_t kMaxThreadLocalBracketSize = 128;
+
+ // The bracket size increment for the brackets of size <= 512 bytes.
+ static constexpr size_t kBracketQuantumSize = 16;
+
+ // Equal to Log2(kQuantumBracketSizeIncrement).
+ static constexpr size_t kBracketQuantumSizeShift = 4;
+
private:
// The base address of the memory region that's managed by this allocator.
uint8_t* base_;
@@ -770,6 +780,19 @@ class RosAlloc {
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+ static size_t RunFreeListOffset() {
+ return OFFSETOF_MEMBER(Run, free_list_);
+ }
+ static size_t RunFreeListHeadOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
+ }
+ static size_t RunFreeListSizeOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
+ }
+ static size_t RunSlotNextOffset() {
+ return OFFSETOF_MEMBER(Slot, next_);
+ }
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 6c32658e43..bb7e854ea1 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,8 +56,7 @@ void StickyMarkSweep::MarkReachableObjects() {
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
-void StickyMarkSweep::Sweep(bool swap_bitmaps) {
- UNUSED(swap_bitmaps);
+void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 3a0d814a20..b1572cc7ea 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -56,7 +56,7 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
+ CHECK(mark_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index a5dbad9af6..c0810110cc 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -48,9 +48,7 @@ class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
- UNUSED(ptr);
- }
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9e882a898e..bbfcb31ab1 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -31,8 +31,7 @@ class CountObjectsAllocated {
explicit CountObjectsAllocated(size_t* objects_allocated)
: objects_allocated_(objects_allocated) {}
- void operator()(mirror::Object* obj) const {
- UNUSED(obj);
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
++*objects_allocated_;
}
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 2c44da231e..f1d26d9a41 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -105,8 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread) OVERRIDE {
- UNUSED(thread); // Fix cppling bug.
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 42b348ac58..192371fe75 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '2', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2dd2b7d403..ed64d7efbe 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,7 +19,6 @@
#include <sstream>
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "atomic.h"
#include "class_linker.h"
@@ -37,6 +36,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
+#include "oat_quick_method_header.h"
#include "thread.h"
#include "thread_list.h"
@@ -252,7 +252,9 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
instrumentation_stack_->insert(it, instrumentation_frame);
SetReturnPc(instrumentation_exit_pc_);
}
- dex_pcs_.push_back(GetCurrentCode().ToDexPc(last_return_pc_));
+ dex_pcs_.push_back((GetCurrentOatQuickMethodHeader() == nullptr)
+ ? DexFile::kDexNoIndex
+ : GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_));
last_return_pc_ = return_pc;
++instrumentation_stack_depth_;
return true; // Continue.
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 179353e84b..f4658d5342 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -459,4 +459,12 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
}
}
+InternTable::Table::Table() {
+ Runtime* const runtime = Runtime::Current();
+ pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+ post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+}
+
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 24c5af938c..3a4e8d8f11 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -146,6 +146,7 @@ class InternTable {
// weak interns and strong interns.
class Table {
public:
+ Table();
mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 44eb29ed2c..18fb0d8518 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -40,8 +40,9 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
uint16_t inst_data) {
const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -234,8 +235,9 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
bool do_assignability_check = do_access_check;
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -775,7 +777,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
template<bool is_range, bool do_assignability_check>
bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+ const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
const uint4_t num_additional_registers = inst->VRegB_25x();
// Argument word count.
const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
@@ -790,7 +792,6 @@ bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_fr
vregC = inst->VRegC_3rc();
} else {
// TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- UNUSED(inst_data);
inst->GetAllArgs25x(arg);
}
@@ -806,7 +807,8 @@ template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
- const uint16_t number_of_inputs = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ const uint16_t number_of_inputs =
+ (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
// TODO: find a cleaner way to separate non-range and range information without duplicating
// code.
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0a4d6e3d9e..df6936bf01 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1072,9 +1072,8 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand
return WriteTaggedObject(reply, contended_monitor);
}
-static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
+static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
@@ -1172,6 +1171,13 @@ static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pRe
return VM_AllClassesImpl(pReply, false, false);
}
+// Delete function class to use std::unique_ptr with JdwpEvent.
+struct JdwpEventDeleter {
+ void operator()(JdwpEvent* event) {
+ EventFree(event);
+ }
+};
+
/*
* Set an event trigger.
*
@@ -1185,7 +1191,7 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
CHECK_LT(modifier_count, 256); /* reasonableness check */
- JdwpEvent* pEvent = EventAlloc(modifier_count);
+ std::unique_ptr<JDWP::JdwpEvent, JdwpEventDeleter> pEvent(EventAlloc(modifier_count));
pEvent->eventKind = event_kind;
pEvent->suspend_policy = suspend_policy;
pEvent->modCount = modifier_count;
@@ -1294,8 +1300,6 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
break;
default:
LOG(WARNING) << "Unsupported modifier " << mod.modKind << " for event " << pEvent->eventKind;
- // Free allocated event to avoid leak before leaving.
- EventFree(pEvent);
return JDWP::ERR_NOT_IMPLEMENTED;
}
}
@@ -1311,13 +1315,14 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
VLOG(jdwp) << StringPrintf(" --> event requestId=%#x", requestId);
/* add it to the list */
- JdwpError err = state->RegisterEvent(pEvent);
+ JdwpError err = state->RegisterEvent(pEvent.get());
if (err != ERR_NONE) {
/* registration failed, probably because event is bogus */
- EventFree(pEvent);
LOG(WARNING) << "WARNING: event request rejected";
+ return err;
}
- return err;
+ pEvent.release();
+ return ERR_NONE;
}
static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4c5316227c..4187358bc0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -25,37 +25,77 @@
namespace art {
namespace jit {
+static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtData = PROT_READ | PROT_WRITE;
+static constexpr int kProtCode = PROT_READ | PROT_EXEC;
+
+#define CHECKED_MPROTECT(memory, size, prot) \
+ do { \
+ int rc = mprotect(memory, size, prot); \
+ if (UNLIKELY(rc != 0)) { \
+ errno = rc; \
+ PLOG(FATAL) << "Failed to mprotect jit code cache"; \
+ } \
+ } while (false) \
+
JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
CHECK_GT(capacity, 0U);
CHECK_LT(capacity, kMaxCapacity);
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
- MemMap* map = MemMap::MapAnonymous("jit-code-cache", nullptr, capacity,
- PROT_READ | PROT_WRITE | PROT_EXEC, false, false, &error_str);
- if (map == nullptr) {
+ MemMap* data_map = MemMap::MapAnonymous(
+ "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
+ if (data_map == nullptr) {
+ std::ostringstream oss;
+ oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
+ // Data cache is 1 / 4 of the map.
+ // TODO: Make this variable?
+ size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
+ size_t code_size = data_map->Size() - data_size;
+ uint8_t* divider = data_map->Begin() + data_size;
+
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
+ if (code_map == nullptr) {
std::ostringstream oss;
oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
return nullptr;
}
- return new JitCodeCache(map);
+ DCHECK_EQ(code_map->Size(), code_size);
+ DCHECK_EQ(code_map->Begin(), divider);
+ return new JitCodeCache(code_map, data_map);
}
-JitCodeCache::JitCodeCache(MemMap* mem_map)
- : lock_("Jit code cache", kJitCodeCacheLock), num_methods_(0) {
- VLOG(jit) << "Created jit code cache size=" << PrettySize(mem_map->Size());
- mem_map_.reset(mem_map);
- uint8_t* divider = mem_map->Begin() + RoundUp(mem_map->Size() / 4, kPageSize);
- // Data cache is 1 / 4 of the map. TODO: Make this variable?
- // Put data at the start.
- data_cache_ptr_ = mem_map->Begin();
- data_cache_end_ = divider;
- data_cache_begin_ = data_cache_ptr_;
- mprotect(data_cache_ptr_, data_cache_end_ - data_cache_begin_, PROT_READ | PROT_WRITE);
- // Code cache after.
- code_cache_begin_ = divider;
- code_cache_ptr_ = divider;
- code_cache_end_ = mem_map->End();
+JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
+ : lock_("Jit code cache", kJitCodeCacheLock),
+ code_map_(code_map),
+ data_map_(data_map),
+ num_methods_(0) {
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
+
+ code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
+ data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
+
+ if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
+ PLOG(FATAL) << "create_mspace_with_base failed";
+ }
+
+ // Prevent morecore requests from the mspace.
+ mspace_set_footprint_limit(code_mspace_, code_map_->Size());
+ mspace_set_footprint_limit(data_mspace_, data_map_->Size());
+
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
}
bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
@@ -63,44 +103,97 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
}
bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
- return ptr >= code_cache_begin_ && ptr < code_cache_end_;
+ return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
-void JitCodeCache::FlushInstructionCache() {
- UNIMPLEMENTED(FATAL);
- // TODO: Investigate if we need to do this.
- // __clear_cache(reinterpret_cast<char*>(code_cache_begin_), static_cast<int>(CodeCacheSize()));
-}
+class ScopedCodeCacheWrite {
+ public:
+ explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
+ }
+ ~ScopedCodeCacheWrite() {
+ CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
+ }
+ private:
+ MemMap* const code_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
+};
+
+uint8_t* JitCodeCache::CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ // Ensure the header ends up at expected instruction alignment.
+ size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
+ size_t total_size = header_size + code_size;
+
+ OatQuickMethodHeader* method_header = nullptr;
+ uint8_t* code_ptr = nullptr;
-uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
MutexLock mu(self, lock_);
- if (size > CodeCacheRemain()) {
- return nullptr;
+ {
+ ScopedCodeCacheWrite scc(code_map_.get());
+ uint8_t* result = reinterpret_cast<uint8_t*>(
+ mspace_memalign(code_mspace_, alignment, total_size));
+ if (result == nullptr) {
+ return nullptr;
+ }
+ code_ptr = result + header_size;
+ DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
+
+ std::copy(code, code + code_size, code_ptr);
+ method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ new (method_header) OatQuickMethodHeader(
+ (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
+ (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
+ (gc_map == nullptr) ? 0 : code_ptr - gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code_size);
}
+
+ __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
+ reinterpret_cast<char*>(code_ptr + code_size));
+
++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
- code_cache_ptr_ += size;
- return code_cache_ptr_ - size;
+ return reinterpret_cast<uint8_t*>(method_header);
+}
+
+size_t JitCodeCache::CodeCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
+}
+
+size_t JitCodeCache::DataCacheSize() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t bytes_allocated = 0;
+ mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+ return bytes_allocated;
}
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
- MutexLock mu(self, lock_);
size = RoundUp(size, sizeof(void*));
- if (size > DataCacheRemain()) {
- return nullptr;
- }
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ MutexLock mu(self, lock_);
+ return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
- MutexLock mu(self, lock_);
- const size_t size = RoundUp(end - begin, sizeof(void*));
- if (size > DataCacheRemain()) {
+ uint8_t* result = ReserveData(self, end - begin);
+ if (result == nullptr) {
return nullptr; // Out of space in the data cache.
}
- std::copy(begin, end, data_cache_ptr_);
- data_cache_ptr_ += size;
- return data_cache_ptr_ - size;
+ std::copy(begin, end, result);
+ return result;
}
const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index f485e4aded..fa90c1806f 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
#include "oat_file.h"
@@ -48,33 +49,25 @@ class JitCodeCache {
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- const uint8_t* CodeCachePtr() const {
- return code_cache_ptr_;
- }
-
- size_t CodeCacheSize() const {
- return code_cache_ptr_ - code_cache_begin_;
- }
-
- size_t CodeCacheRemain() const {
- return code_cache_end_ - code_cache_ptr_;
- }
-
- const uint8_t* DataCachePtr() const {
- return data_cache_ptr_;
+ size_t NumMethods() const {
+ return num_methods_;
}
- size_t DataCacheSize() const {
- return data_cache_ptr_ - data_cache_begin_;
- }
+ size_t CodeCacheSize() REQUIRES(!lock_);
- size_t DataCacheRemain() const {
- return data_cache_end_ - data_cache_ptr_;
- }
+ size_t DataCacheSize() REQUIRES(!lock_);
- size_t NumMethods() const {
- return num_methods_;
- }
+ // Allocate and write code and its metadata to the code cache.
+ uint8_t* CommitCode(Thread* self,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_);
// Return true if the code cache contains the code pointer which si the entrypoint of the method.
bool ContainsMethod(ArtMethod* method) const
@@ -83,9 +76,6 @@ class JitCodeCache {
// Return true if the code cache contains a code ptr.
bool ContainsCodePtr(const void* ptr) const;
- // Reserve a region of code of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveCode(Thread* self, size_t size) REQUIRES(!lock_);
-
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
@@ -105,25 +95,19 @@ class JitCodeCache {
private:
// Takes ownership of code_mem_map.
- explicit JitCodeCache(MemMap* code_mem_map);
-
- // Unimplemented, TODO: Determine if it is necessary.
- void FlushInstructionCache();
+ JitCodeCache(MemMap* code_map, MemMap* data_map);
// Lock which guards.
Mutex lock_;
- // Mem map which holds code and data. We do this since we need to have 32 bit offsets from method
- // headers in code cache which point to things in the data cache. If the maps are more than 4GB
- // apart, having multiple maps wouldn't work.
- std::unique_ptr<MemMap> mem_map_;
- // Code cache section.
- uint8_t* code_cache_ptr_;
- const uint8_t* code_cache_begin_;
- const uint8_t* code_cache_end_;
- // Data cache section.
- uint8_t* data_cache_ptr_;
- const uint8_t* data_cache_begin_;
- const uint8_t* data_cache_end_;
+ // Mem map which holds code.
+ std::unique_ptr<MemMap> code_map_;
+ // Mem map which holds data (stack maps and profiling info).
+ std::unique_ptr<MemMap> data_map_;
+ // The opaque mspace for allocating code.
+ void* code_mspace_;
+ // The opaque mspace for allocating data.
+ void* data_mspace_;
+ // Number of compiled methods.
size_t num_methods_;
// This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
diff --git a/runtime/jit/jit_code_cache_test.cc b/runtime/jit/jit_code_cache_test.cc
deleted file mode 100644
index c76dc1110a..0000000000
--- a/runtime/jit/jit_code_cache_test.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_runtime_test.h"
-
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "jit_code_cache.h"
-#include "scoped_thread_state_change.h"
-#include "thread-inl.h"
-
-namespace art {
-namespace jit {
-
-class JitCodeCacheTest : public CommonRuntimeTest {
- public:
-};
-
-TEST_F(JitCodeCacheTest, TestCoverage) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- ASSERT_EQ(code_cache->CodeCacheSize(), 0u);
- ASSERT_GT(code_cache->CodeCacheRemain(), 0u);
- ASSERT_TRUE(code_cache->DataCachePtr() != nullptr);
- ASSERT_EQ(code_cache->DataCacheSize(), 0u);
- ASSERT_GT(code_cache->DataCacheRemain(), 0u);
- ASSERT_EQ(code_cache->CodeCacheRemain() + code_cache->DataCacheRemain(), kSize);
- ASSERT_EQ(code_cache->NumMethods(), 0u);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- uint8_t* const reserved_code = code_cache->ReserveCode(soa.Self(), 4 * KB);
- ASSERT_TRUE(reserved_code != nullptr);
- ASSERT_TRUE(code_cache->ContainsCodePtr(reserved_code));
- ASSERT_EQ(code_cache->NumMethods(), 1u);
- Runtime* const runtime = Runtime::Current();
- ClassLinker* const class_linker = runtime->GetClassLinker();
- ArtMethod* method = &class_linker->AllocArtMethodArray(soa.Self(),
- runtime->GetLinearAlloc(),
- 1)->At(0);
- ASSERT_FALSE(code_cache->ContainsMethod(method));
- method->SetEntryPointFromQuickCompiledCode(reserved_code);
- ASSERT_TRUE(code_cache->ContainsMethod(method));
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- // Save the code and then change it.
- code_cache->SaveCompiledCode(method, reserved_code);
- method->SetEntryPointFromQuickCompiledCode(nullptr);
- ASSERT_EQ(code_cache->GetCodeFor(method), reserved_code);
- const uint8_t data_arr[] = {1, 2, 3, 4, 5};
- uint8_t* data_ptr = code_cache->AddDataArray(soa.Self(), data_arr, data_arr + sizeof(data_arr));
- ASSERT_TRUE(data_ptr != nullptr);
- ASSERT_EQ(memcmp(data_ptr, data_arr, sizeof(data_arr)), 0);
-}
-
-TEST_F(JitCodeCacheTest, TestOverflow) {
- std::string error_msg;
- constexpr size_t kSize = 1 * MB;
- std::unique_ptr<JitCodeCache> code_cache(
- JitCodeCache::Create(kSize, &error_msg));
- ASSERT_TRUE(code_cache.get() != nullptr) << error_msg;
- ASSERT_TRUE(code_cache->CodeCachePtr() != nullptr);
- size_t code_bytes = 0;
- size_t data_bytes = 0;
- constexpr size_t kCodeArrSize = 4 * KB;
- constexpr size_t kDataArrSize = 4 * KB;
- uint8_t data_arr[kDataArrSize];
- std::fill_n(data_arr, arraysize(data_arr), 53);
- // Add code and data until we are full.
- uint8_t* code_ptr = nullptr;
- uint8_t* data_ptr = nullptr;
- do {
- code_ptr = code_cache->ReserveCode(Thread::Current(), kCodeArrSize);
- data_ptr = code_cache->AddDataArray(Thread::Current(), data_arr, data_arr + kDataArrSize);
- if (code_ptr != nullptr) {
- code_bytes += kCodeArrSize;
- }
- if (data_ptr != nullptr) {
- data_bytes += kDataArrSize;
- }
- } while (code_ptr != nullptr || data_ptr != nullptr);
- // Make sure we added a reasonable amount
- CHECK_GT(code_bytes, 0u);
- CHECK_LE(code_bytes, kSize);
- CHECK_GT(data_bytes, 0u);
- CHECK_LE(data_bytes, kSize);
- CHECK_GE(code_bytes + data_bytes, kSize * 4 / 5);
-}
-
-} // namespace jit
-} // namespace art
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 4104d7a0e8..dab10403af 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -93,8 +93,7 @@ void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) {
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6bc18291cb..234a733967 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1743,8 +1743,9 @@ class JNI {
return static_cast<jchar*>(s->GetValue());
}
- static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
- UNUSED(chars);
+ static void ReleaseStringCritical(JNIEnv* env,
+ jstring java_string,
+ const jchar* chars ATTRIBUTE_UNUSED) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 6a6d1986dc..7b91b0b2b6 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -32,7 +32,7 @@ union PACKED(4) JValue {
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
- i = ((static_cast<int32_t>(new_b) << 24) >> 24); // Sign-extend.
+ j = ((static_cast<int64_t>(new_b) << 56) >> 56); // Sign-extend to 64 bits.
}
uint16_t GetC() const { return c; }
@@ -45,7 +45,9 @@ union PACKED(4) JValue {
void SetF(float new_f) { f = new_f; }
int32_t GetI() const { return i; }
- void SetI(int32_t new_i) { i = new_i; }
+ void SetI(int32_t new_i) {
+ j = ((static_cast<int64_t>(new_i) << 32) >> 32); // Sign-extend to 64 bits.
+ }
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
@@ -55,7 +57,7 @@ union PACKED(4) JValue {
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
- i = ((static_cast<int32_t>(new_s) << 16) >> 16); // Sign-extend.
+ j = ((static_cast<int64_t>(new_s) << 48) >> 48); // Sign-extend to 64 bits.
}
uint8_t GetZ() const { return z; }
diff --git a/runtime/leb128_test.cc b/runtime/leb128_test.cc
index 09f7ecc5d6..122f55ebee 100644
--- a/runtime/leb128_test.cc
+++ b/runtime/leb128_test.cc
@@ -88,7 +88,7 @@ static DecodeSignedLeb128TestCase sleb128_tests[] = {
{-0x08000000, {0x80, 0x80, 0x80, 0x40, 0}},
{-0x08000001, {0xFF, 0xFF, 0xFF, 0xBF, 0x7F}},
{-0x20000000, {0x80, 0x80, 0x80, 0x80, 0x7E}},
- {(-1) << 31, {0x80, 0x80, 0x80, 0x80, 0x78}},
+ {static_cast<int32_t>(0x80000000), {0x80, 0x80, 0x80, 0x80, 0x78}},
};
TEST(Leb128Test, UnsignedSinglesVector) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 3d540297e5..ec7d758ebb 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,9 +100,8 @@ class SetLengthVisitor {
explicit SetLengthVisitor(int32_t length) : length_(length) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2ac44fc041..53fedab377 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -565,24 +565,58 @@ ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
return nullptr;
}
-ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) {
- // Is the field in this class?
- // Interfaces are not relevant because they can't contain instance fields.
- for (size_t i = 0; i < NumInstanceFields(); ++i) {
- ArtField* f = GetInstanceField(i);
- if (name == f->GetName() && type == f->GetTypeDescriptor()) {
- return f;
+// Custom binary search to avoid double comparisons from std::binary_search.
+static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields,
+ const StringPiece& name,
+ const StringPiece& type)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (fields == nullptr) {
+ return nullptr;
+ }
+ size_t low = 0;
+ size_t high = fields->Length();
+ ArtField* ret = nullptr;
+ while (low < high) {
+ size_t mid = (low + high) / 2;
+ ArtField& field = fields->At(mid);
+ // Fields are sorted by class, then name, then type descriptor. This is verified in dex file
+ // verifier. There can be multiple fields with the same in the same class name due to proguard.
+ int result = StringPiece(field.GetName()).Compare(name);
+ if (result == 0) {
+ result = StringPiece(field.GetTypeDescriptor()).Compare(type);
+ }
+ if (result < 0) {
+ low = mid + 1;
+ } else if (result > 0) {
+ high = mid;
+ } else {
+ ret = &field;
+ break;
}
}
- return nullptr;
+ if (kIsDebugBuild) {
+ ArtField* found = nullptr;
+ for (ArtField& field : MakeIterationRangeFromLengthPrefixedArray(fields)) {
+ if (name == field.GetName() && type == field.GetTypeDescriptor()) {
+ found = &field;
+ break;
+ }
+ }
+ CHECK_EQ(found, ret) << "Found " << PrettyField(found) << " vs " << PrettyField(ret);
+ }
+ return ret;
+}
+
+ArtField* Class::FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type) {
+ // Binary search by name. Interfaces are not relevant because they can't contain instance fields.
+ return FindFieldByNameAndType(GetIFieldsPtr(), name, type);
}
ArtField* Class::FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx) {
if (GetDexCache() == dex_cache) {
- for (size_t i = 0; i < NumInstanceFields(); ++i) {
- ArtField* f = GetInstanceField(i);
- if (f->GetDexFieldIndex() == dex_field_idx) {
- return f;
+ for (ArtField& field : GetIFields()) {
+ if (field.GetDexFieldIndex() == dex_field_idx) {
+ return &field;
}
}
}
@@ -615,21 +649,14 @@ ArtField* Class::FindInstanceField(const DexCache* dex_cache, uint32_t dex_field
ArtField* Class::FindDeclaredStaticField(const StringPiece& name, const StringPiece& type) {
DCHECK(type != nullptr);
- for (size_t i = 0; i < NumStaticFields(); ++i) {
- ArtField* f = GetStaticField(i);
- if (name == f->GetName() && type == f->GetTypeDescriptor()) {
- return f;
- }
- }
- return nullptr;
+ return FindFieldByNameAndType(GetSFieldsPtr(), name, type);
}
ArtField* Class::FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx) {
if (dex_cache == GetDexCache()) {
- for (size_t i = 0; i < NumStaticFields(); ++i) {
- ArtField* f = GetStaticField(i);
- if (f->GetDexFieldIndex() == dex_field_idx) {
- return f;
+ for (ArtField& field : GetSFields()) {
+ if (field.GetDexFieldIndex() == dex_field_idx) {
+ return &field;
}
}
}
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8fb860fa6b..48f2ca59e8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -20,9 +20,8 @@
#include "class_linker.h"
#include "common_runtime_test.h"
-#include "gc/heap.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "linear_alloc.h"
+#include "mirror/class_loader-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -36,7 +35,9 @@ TEST_F(DexCacheTest, Open) {
StackHandleScope<1> hs(soa.Self());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
- hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(),
+ *java_lang_dex_file_,
+ Runtime::Current()->GetLinearAlloc())));
ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
@@ -45,5 +46,21 @@ TEST_F(DexCacheTest, Open) {
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
}
+TEST_F(DexCacheTest, LinearAlloc) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader(LoadDex("Main"));
+ ASSERT_TRUE(jclass_loader != nullptr);
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LMain;", class_loader);
+ ASSERT_TRUE(klass != nullptr);
+ LinearAlloc* const linear_alloc = klass->GetClassLoader()->GetAllocator();
+ EXPECT_NE(linear_alloc, runtime->GetLinearAlloc());
+ EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods()));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 255a0f23d8..81e7e6d675 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -274,7 +274,7 @@ void Monitor::Lock(Thread* self) {
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
const char* owners_filename;
- uint32_t owners_line_number;
+ int32_t owners_line_number;
TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
LOG(WARNING) << "Long monitor contention event with owner method="
@@ -1086,7 +1086,7 @@ bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
}
void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc,
- const char** source_file, uint32_t* line_number) const {
+ const char** source_file, int32_t* line_number) const {
// If method is null, location is unknown
if (method == nullptr) {
*source_file = "";
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 61235efd89..707d0f112c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -183,7 +183,7 @@ class Monitor {
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number)
+ const char* owner_filename, int32_t owner_line_number)
SHARED_REQUIRES(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
@@ -235,7 +235,7 @@ class Monitor {
// Translates the provided method and pc into its declaring class' source file and line number.
void TranslateLocation(ArtMethod* method, uint32_t pc,
- const char** source_file, uint32_t* line_number) const
+ const char** source_file, int32_t* line_number) const
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index efe2e823d9..82ef2d841a 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -50,7 +50,7 @@ static char* EventLogWriteString(char* dst, const char* value, size_t len) {
}
void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number) {
+ const char* owner_filename, int32_t owner_line_number) {
// Emit the event list length, 1 byte.
char eventBuffer[174];
char* cp = eventBuffer;
@@ -80,7 +80,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
const char* filename;
- uint32_t line_number;
+ int32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
cp = EventLogWriteString(cp, filename, strlen(filename));
diff --git a/runtime/monitor_linux.cc b/runtime/monitor_linux.cc
index 856ebe45f9..1c77ac0eb3 100644
--- a/runtime/monitor_linux.cc
+++ b/runtime/monitor_linux.cc
@@ -18,7 +18,7 @@
namespace art {
-void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, uint32_t) {
+void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, int32_t) {
}
} // namespace art
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 1a6beadd37..8b2f4d8d24 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -74,7 +74,7 @@ static jlongArray ConvertDexFilesToJavaArray(JNIEnv* env,
const OatFile* oat_file,
std::vector<std::unique_ptr<const DexFile>>& vec) {
// Add one for the oat file.
- jlongArray long_array = env->NewLongArray(static_cast<jsize>(1u + vec.size()));
+ jlongArray long_array = env->NewLongArray(static_cast<jsize>(kDexFileIndexStart + vec.size()));
if (env->ExceptionCheck() == JNI_TRUE) {
return nullptr;
}
@@ -230,7 +230,8 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
}
}
- if (all_deleted) {
+ // oat_file can be null if we are running without dex2oat.
+ if (all_deleted && oat_file != nullptr) {
// If all of the dex files are no longer in use we can unmap the corresponding oat file.
VLOG(class_linker) << "Unregistering " << oat_file;
runtime->GetOatFileManager().UnRegisterAndDeleteOatFile(oat_file);
@@ -242,7 +243,8 @@ static jclass DexFile_defineClassNative(JNIEnv* env,
jclass,
jstring javaName,
jobject javaLoader,
- jobject cookie) {
+ jobject cookie,
+ jobject dexFile) {
std::vector<const DexFile*> dex_files;
const OatFile* oat_file;
if (!ConvertJavaArrayToDexFiles(env, cookie, /*out*/ dex_files, /*out*/ oat_file)) {
@@ -263,16 +265,22 @@ static jclass DexFile_defineClassNative(JNIEnv* env,
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->RegisterDexFile(*dex_file);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
+ class_linker->RegisterDexFile(
+ *dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()));
mirror::Class* result = class_linker->DefineClass(soa.Self(),
descriptor.c_str(),
hash,
class_loader,
*dex_file,
*dex_class_def);
+ // Add the used dex file. This only required for the DexFile.loadClass API since normal
+ // class loaders already keep their dex files live.
+ class_linker->InsertDexFileInToClassLoader(soa.Decode<mirror::Object*>(dexFile),
+ class_loader.Get());
if (result != nullptr) {
VLOG(class_linker) << "DexFile_defineClassNative returning " << result
<< " for " << class_name.c_str();
@@ -421,8 +429,13 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"),
- NATIVE_METHOD(DexFile, defineClassNative,
- "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Object;)Ljava/lang/Class;"),
+ NATIVE_METHOD(DexFile,
+ defineClassNative,
+ "(Ljava/lang/String;"
+ "Ljava/lang/ClassLoader;"
+ "Ljava/lang/Object;"
+ "Ldalvik/system/DexFile;"
+ ")Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(Ljava/lang/Object;)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(DexFile, getDexOptNeeded,
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4f957233c4..4c5dc3ad25 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -497,7 +497,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc())));
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 5725b6ff6c..40aca0d249 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -134,6 +134,9 @@ bool OatHeader::IsValid() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return false;
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return false;
+ }
return true;
}
@@ -156,6 +159,9 @@ std::string OatHeader::GetValidationErrorMessage() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return "Image patch delta not page-aligned.";
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return StringPrintf("Invalid instruction set, %d.", static_cast<int>(instruction_set_));
+ }
return "";
}
@@ -477,15 +483,4 @@ OatMethodOffsets::OatMethodOffsets(uint32_t code_offset) : code_offset_(code_off
OatMethodOffsets::~OatMethodOffsets() {}
-OatQuickMethodHeader::OatQuickMethodHeader(
- uint32_t mapping_table_offset, uint32_t vmap_table_offset, uint32_t gc_map_offset,
- uint32_t frame_size_in_bytes, uint32_t core_spill_mask, uint32_t fp_spill_mask,
- uint32_t code_size)
- : mapping_table_offset_(mapping_table_offset), vmap_table_offset_(vmap_table_offset),
- gc_map_offset_(gc_map_offset),
- frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask), code_size_(code_size) {
-}
-
-OatQuickMethodHeader::~OatQuickMethodHeader() {}
-
} // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index 2aa5783bde..276e7f3ea5 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -22,7 +22,6 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
#include "dex_file.h"
-#include "quick/quick_method_frame_info.h"
#include "safe_map.h"
namespace art {
@@ -170,30 +169,6 @@ class PACKED(4) OatMethodOffsets {
uint32_t code_offset_;
};
-// OatQuickMethodHeader precedes the raw code chunk generated by the Quick compiler.
-class PACKED(4) OatQuickMethodHeader {
- public:
- OatQuickMethodHeader(uint32_t mapping_table_offset = 0U, uint32_t vmap_table_offset = 0U,
- uint32_t gc_map_offset = 0U, uint32_t frame_size_in_bytes = 0U,
- uint32_t core_spill_mask = 0U, uint32_t fp_spill_mask = 0U,
- uint32_t code_size = 0U);
-
- ~OatQuickMethodHeader();
-
- OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
-
- // The offset in bytes from the start of the mapping table to the end of the header.
- uint32_t mapping_table_offset_;
- // The offset in bytes from the start of the vmap table to the end of the header.
- uint32_t vmap_table_offset_;
- // The offset in bytes from the start of the gc map to the end of the header.
- uint32_t gc_map_offset_;
- // The stack frame information.
- QuickMethodFrameInfo frame_info_;
- // The code size in bytes.
- uint32_t code_size_;
-};
-
} // namespace art
#endif // ART_RUNTIME_OAT_H_
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index f7913e177a..7b92120fde 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_OAT_FILE_INL_H_
#include "oat_file.h"
+#include "oat_quick_method_header.h"
namespace art {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 6cbbce9bb1..a162a4ea72 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,6 +18,7 @@
#include <dlfcn.h>
#include <string.h>
+#include <type_traits>
#include <unistd.h>
#include <cstdlib>
@@ -46,6 +47,7 @@
#include "os.h"
#include "runtime.h"
#include "utils.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "vmap_table.h"
namespace art {
@@ -247,10 +249,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename);
- UNUSED(requested_base);
- UNUSED(abs_dex_location);
- UNUSED(error_msg);
+ UNUSED(elf_filename, requested_base, abs_dex_location, error_msg);
return false;
#else
{
@@ -390,13 +389,13 @@ bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file
// Readjust to be non-inclusive upper bound.
end_ += sizeof(uint32_t);
- bss_begin_ = elf_file_->FindDynamicSymbolAddress("oatbss");
+ bss_begin_ = const_cast<uint8_t*>(elf_file_->FindDynamicSymbolAddress("oatbss"));
if (bss_begin_ == nullptr) {
// No .bss section. Clear dlerror().
bss_end_ = nullptr;
dlerror();
} else {
- bss_end_ = elf_file_->FindDynamicSymbolAddress("oatbsslastword");
+ bss_end_ = const_cast<uint8_t*>(elf_file_->FindDynamicSymbolAddress("oatbsslastword"));
if (bss_end_ == nullptr) {
*error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'",
file->GetPath().c_str());
@@ -409,10 +408,31 @@ bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file
return Setup(abs_dex_location, error_msg);
}
+// Read an unaligned entry from the OatDexFile data in OatFile and advance the read
+// position by the number of bytes read, i.e. sizeof(T).
+// Return true on success, false if the read would go beyond the end of the OatFile.
+template <typename T>
+inline static bool ReadOatDexFileData(const OatFile& oat_file,
+ /*inout*/const uint8_t** oat,
+ /*out*/T* value) {
+ DCHECK(oat != nullptr);
+ DCHECK(value != nullptr);
+ DCHECK_LE(*oat, oat_file.End());
+ if (UNLIKELY(static_cast<size_t>(oat_file.End() - *oat) < sizeof(T))) {
+ return false;
+ }
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
+ typedef __attribute__((__aligned__(1))) T unaligned_type;
+ *value = *reinterpret_cast<const unaligned_type*>(*oat);
+ *oat += sizeof(T);
+ return true;
+}
+
bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
- *error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
+ *error_msg = StringPrintf("Invalid oat header for '%s': %s",
+ GetLocation().c_str(),
cause.c_str());
return false;
}
@@ -426,33 +446,42 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
oat += GetOatHeader().GetKeyValueStoreSize();
if (oat > End()) {
*error_msg = StringPrintf("In oat file '%s' found truncated variable-size data: "
- "%p + %zd + %ud <= %p", GetLocation().c_str(),
- Begin(), sizeof(OatHeader), GetOatHeader().GetKeyValueStoreSize(),
+ "%p + %zu + %u <= %p",
+ GetLocation().c_str(),
+ Begin(),
+ sizeof(OatHeader),
+ GetOatHeader().GetKeyValueStoreSize(),
End());
return false;
}
+ size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
- uint32_t dex_file_location_size = *reinterpret_cast<const uint32_t*>(oat);
- if (UNLIKELY(dex_file_location_size == 0U)) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with empty location name",
- GetLocation().c_str(), i);
+ uint32_t dex_file_location_size;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_location_size))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu truncated after dex file "
+ "location size",
+ GetLocation().c_str(),
+ i);
return false;
}
- oat += sizeof(dex_file_location_size);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd truncated after dex file "
- "location size", GetLocation().c_str(), i);
+ if (UNLIKELY(dex_file_location_size == 0U)) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu with empty location name",
+ GetLocation().c_str(),
+ i);
return false;
}
const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
oat += dex_file_location_size;
if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with truncated dex file "
- "location", GetLocation().c_str(), i);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu with truncated dex file "
+ "location",
+ GetLocation().c_str(),
+ i);
return false;
}
@@ -460,46 +489,61 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
abs_dex_location,
std::string(dex_file_location_data, dex_file_location_size));
- uint32_t dex_file_checksum = *reinterpret_cast<const uint32_t*>(oat);
- oat += sizeof(dex_file_checksum);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated after "
- "dex file checksum", GetLocation().c_str(), i,
+ uint32_t dex_file_checksum;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' truncated after "
+ "dex file checksum",
+ GetLocation().c_str(),
+ i,
dex_file_location.c_str());
return false;
}
- uint32_t dex_file_offset = *reinterpret_cast<const uint32_t*>(oat);
- if (UNLIKELY(dex_file_offset == 0U)) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with zero dex "
- "file offset", GetLocation().c_str(), i, dex_file_location.c_str());
+ uint32_t dex_file_offset;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_offset))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' truncated "
+ "after dex file offsets",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str());
return false;
}
- if (UNLIKELY(dex_file_offset > Size())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with dex file "
- "offset %ud > %zd", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_offset, Size());
+ if (UNLIKELY(dex_file_offset == 0U)) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with zero dex "
+ "file offset",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str());
return false;
}
- oat += sizeof(dex_file_offset);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
- "after dex file offsets", GetLocation().c_str(), i,
- dex_file_location.c_str());
+ if (UNLIKELY(dex_file_offset > Size())) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
+ "offset %u > %zu",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_offset,
+ Size());
return false;
}
const uint8_t* dex_file_pointer = Begin() + dex_file_offset;
if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- "dex file magic '%s'", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_pointer);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
+ "dex file magic '%s'",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_pointer);
return false;
}
if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- "dex file version '%s'", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_pointer);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
+ "dex file version '%s'",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_pointer);
return false;
}
const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
@@ -507,12 +551,33 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
- "method offsets", GetLocation().c_str(), i,
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with truncated "
+ "method offsets",
+ GetLocation().c_str(),
+ i,
dex_file_location.c_str());
return false;
}
+ uint8_t* current_dex_cache_arrays = nullptr;
+ if (dex_cache_arrays != nullptr) {
+ DexCacheArraysLayout layout(pointer_size, *header);
+ if (layout.Size() != 0u) {
+ if (static_cast<size_t>(bss_end_ - dex_cache_arrays) < layout.Size()) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with "
+ "truncated dex cache arrays, %zu < %zu.",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays),
+ layout.Size());
+ return false;
+ }
+ current_dex_cache_arrays = dex_cache_arrays;
+ dex_cache_arrays += layout.Size();
+ }
+ }
+
std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
// Create the OatDexFile and add it to the owning container.
@@ -521,7 +586,8 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
canonical_location,
dex_file_checksum,
dex_file_pointer,
- methods_offsets_pointer);
+ methods_offsets_pointer,
+ current_dex_cache_arrays);
oat_dex_files_storage_.push_back(oat_dex_file);
// Add the location and canonical location (if different) to the oat_dex_files_ table.
@@ -532,6 +598,15 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
oat_dex_files_.Put(canonical_key, oat_dex_file);
}
}
+
+ if (dex_cache_arrays != bss_end_) {
+ // We expect the bss section to be either empty (dex_cache_arrays and bss_end_
+ // both null) or contain just the dex cache arrays and nothing else.
+ *error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zu bytes.",
+ GetLocation().c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays));
+ return false;
+ }
return true;
}
@@ -634,13 +709,15 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer)
+ const uint32_t* oat_class_offsets_pointer,
+ uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
canonical_dex_file_location_(canonical_dex_file_location),
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
- oat_class_offsets_pointer_(oat_class_offsets_pointer) {}
+ oat_class_offsets_pointer_(oat_class_offsets_pointer),
+ dex_cache_arrays_(dex_cache_arrays) {}
OatFile::OatDexFile::~OatDexFile() {}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 364b7342d7..6acdf86208 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -301,10 +301,10 @@ class OatFile FINAL {
const uint8_t* end_;
// Pointer to the .bss section, if present, otherwise null.
- const uint8_t* bss_begin_;
+ uint8_t* bss_begin_;
// Pointer to the end of the .bss section, if present, otherwise null.
- const uint8_t* bss_end_;
+ uint8_t* bss_end_;
// Was this oat_file loaded executable?
const bool is_executable_;
@@ -396,6 +396,10 @@ class OatDexFile FINAL {
// Returns the offset to the OatClass information. Most callers should use GetOatClass.
uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+ uint8_t* GetDexCacheArrays() const {
+ return dex_cache_arrays_;
+ }
+
~OatDexFile();
private:
@@ -404,7 +408,8 @@ class OatDexFile FINAL {
const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer);
+ const uint32_t* oat_class_offsets_pointer,
+ uint8_t* dex_cache_arrays);
const OatFile* const oat_file_;
const std::string dex_file_location_;
@@ -412,6 +417,7 @@ class OatDexFile FINAL {
const uint32_t dex_file_location_checksum_;
const uint8_t* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
+ uint8_t* const dex_cache_arrays_;
friend class OatFile;
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 29b879ee80..8d5418d07d 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -65,8 +65,10 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
bool load_executable,
const char* package_name)
- : dex_location_(dex_location), isa_(isa),
- package_name_(package_name), load_executable_(load_executable) {
+ : isa_(isa), package_name_(package_name), load_executable_(load_executable) {
+ CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
+ dex_location_.assign(dex_location);
+
if (load_executable_ && isa != kRuntimeISA) {
LOG(WARNING) << "OatFileAssistant: Load executable specified, "
<< "but isa is not kRuntimeISA. Will not attempt to load executable.";
@@ -110,7 +112,7 @@ bool OatFileAssistant::IsInBootClassPath() {
ClassLinker* class_linker = runtime->GetClassLinker();
const auto& boot_class_path = class_linker->GetBootClassPath();
for (size_t i = 0; i < boot_class_path.size(); i++) {
- if (boot_class_path[i]->GetLocation() == std::string(dex_location_)) {
+ if (boot_class_path[i]->GetLocation() == dex_location_) {
VLOG(oat) << "Dex location " << dex_location_ << " is in boot class path";
return true;
}
@@ -266,7 +268,6 @@ bool OatFileAssistant::HasOriginalDexFiles() {
const std::string* OatFileAssistant::OdexFileName() {
if (!cached_odex_file_name_attempted_) {
- CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
cached_odex_file_name_attempted_ = true;
std::string error_msg;
@@ -330,15 +331,13 @@ const std::string* OatFileAssistant::OatFileName() {
cached_oat_file_name_attempted_ = true;
// Compute the oat file name from the dex location.
- CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
-
// TODO: The oat file assistant should be the definitive place for
// determining the oat file name from the dex location, not
// GetDalvikCacheFilename.
std::string cache_dir = StringPrintf("%s%s",
DalvikCacheDirectory().c_str(), GetInstructionSetString(isa_));
std::string error_msg;
- cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_,
+ cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_.c_str(),
cache_dir.c_str(), &cached_oat_file_name_, &error_msg);
if (!cached_oat_file_name_found_) {
// If we can't determine the oat file name, we treat the oat file as
@@ -413,7 +412,7 @@ bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
// what we provide, which verifies the primary dex checksum for us.
const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
- dex_location_, dex_checksum_pointer, false);
+ dex_location_.c_str(), dex_checksum_pointer, false);
if (oat_dex_file == nullptr) {
return true;
}
@@ -421,7 +420,7 @@ bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
// Verify the dex checksums for any secondary multidex files
for (size_t i = 1; ; i++) {
std::string secondary_dex_location
- = DexFile::GetMultiDexLocation(i, dex_location_);
+ = DexFile::GetMultiDexLocation(i, dex_location_.c_str());
const OatFile::OatDexFile* secondary_oat_dex_file
= file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
if (secondary_oat_dex_file == nullptr) {
@@ -613,16 +612,14 @@ bool OatFileAssistant::RelocateOatFile(const std::string* input_file,
CHECK(error_msg != nullptr);
if (input_file == nullptr) {
- *error_msg = "Patching of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the input file name could not be determined.";
return false;
}
const std::string& input_file_name = *input_file;
if (OatFileName() == nullptr) {
- *error_msg = "Patching of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return false;
}
@@ -666,8 +663,7 @@ bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
CHECK(error_msg != nullptr);
if (OatFileName() == nullptr) {
- *error_msg = "Generation of oat file for dex location "
- + std::string(dex_location_)
+ *error_msg = "Generation of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
return false;
}
@@ -681,14 +677,14 @@ bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
}
std::vector<std::string> args;
- args.push_back("--dex-file=" + std::string(dex_location_));
+ args.push_back("--dex-file=" + dex_location_);
args.push_back("--oat-file=" + oat_file_name);
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
// TODO: Why does dex2oat behave that way?
- if (!OS::FileExists(dex_location_)) {
- *error_msg = "Dex location " + std::string(dex_location_) + " does not exists.";
+ if (!OS::FileExists(dex_location_.c_str())) {
+ *error_msg = "Dex location " + dex_location_ + " does not exists.";
return false;
}
@@ -839,8 +835,7 @@ const uint32_t* OatFileAssistant::GetRequiredDexChecksum() {
required_dex_checksum_attempted_ = true;
required_dex_checksum_found_ = false;
std::string error_msg;
- CHECK(dex_location_ != nullptr) << "OatFileAssistant provided no dex location";
- if (DexFile::GetChecksum(dex_location_, &cached_required_dex_checksum_, &error_msg)) {
+ if (DexFile::GetChecksum(dex_location_.c_str(), &cached_required_dex_checksum_, &error_msg)) {
required_dex_checksum_found_ = true;
has_original_dex_files_ = true;
} else {
@@ -853,7 +848,7 @@ const uint32_t* OatFileAssistant::GetRequiredDexChecksum() {
const OatFile* odex_file = GetOdexFile();
if (odex_file != nullptr) {
const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
- dex_location_, nullptr, false);
+ dex_location_.c_str(), nullptr, false);
if (odex_dex_file != nullptr) {
cached_required_dex_checksum_ = odex_dex_file->GetDexFileLocationChecksum();
required_dex_checksum_found_ = true;
@@ -873,7 +868,7 @@ const OatFile* OatFileAssistant::GetOdexFile() {
std::string error_msg;
cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
odex_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_.c_str(), &error_msg));
if (cached_odex_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
<< odex_file_name << ": " << error_msg;
@@ -904,7 +899,7 @@ const OatFile* OatFileAssistant::GetOatFile() {
std::string error_msg;
cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
oat_file_name.c_str(), nullptr, nullptr, load_executable_,
- dex_location_, &error_msg));
+ dex_location_.c_str(), &error_msg));
if (cached_oat_file_.get() == nullptr) {
VLOG(oat) << "OatFileAssistant test for existing oat file "
<< oat_file_name << ": " << error_msg;
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 664db987d8..f781532b14 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -369,9 +369,7 @@ class OatFileAssistant {
// remaining lifetime of the OatFileAssistant object.
ScopedFlock flock_;
- // In a properly constructed OatFileAssistant object, dex_location_ should
- // never be null.
- const char* dex_location_ = nullptr;
+ std::string dex_location_;
// In a properly constructed OatFileAssistant object, isa_ should be either
// the 32 or 64 bit variant for the current device.
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index cef8702937..2c81eddf39 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -954,9 +954,7 @@ class RaceGenerateTask : public Task {
loaded_oat_file_(nullptr)
{}
- void Run(Thread* self) {
- UNUSED(self);
-
+ void Run(Thread* self ATTRIBUTE_UNUSED) {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
std::vector<std::unique_ptr<const DexFile>> dex_files;
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 3371a3955e..9eee156bb0 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -22,7 +22,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
-#include "dex_file.h"
+#include "dex_file-inl.h"
#include "gc/space/image_space.h"
#include "oat_file_assistant.h"
#include "thread-inl.h"
@@ -30,7 +30,9 @@
namespace art {
// For b/21333911.
-static constexpr bool kDuplicateClassesCheck = false;
+// Only enabled for debug builds to prevent bit rot. There are too many performance regressions for
+// normal builds.
+static constexpr bool kDuplicateClassesCheck = kIsDebugBuild;
const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
@@ -115,9 +117,9 @@ class DexFileAndClassPair : ValueObject {
current_class_index_(current_class_index),
from_loaded_oat_(from_loaded_oat) {}
- DexFileAndClassPair(DexFileAndClassPair&& rhs) = default;
+ DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
- DexFileAndClassPair& operator=(DexFileAndClassPair&& rhs) = default;
+ DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) = default;
const char* GetCachedDescriptor() const {
return cached_descriptor_;
@@ -139,7 +141,7 @@ class DexFileAndClassPair : ValueObject {
void Next() {
++current_class_index_;
- cached_descriptor_ = nullptr;
+ cached_descriptor_ = GetClassDescriptor(dex_file_.get(), current_class_index_);
}
size_t GetCurrentClassIndex() const {
@@ -162,7 +164,7 @@ class DexFileAndClassPair : ValueObject {
}
const char* cached_descriptor_;
- std::unique_ptr<const DexFile> dex_file_;
+ std::shared_ptr<const DexFile> dex_file_;
size_t current_class_index_;
bool from_loaded_oat_; // We only need to compare mismatches between what we load now
// and what was loaded before. Any old duplicates must have been
@@ -215,8 +217,17 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Add dex files from already loaded oat files, but skip boot.
const OatFile* boot_oat = GetBootOatFile();
+ // The same OatFile can be loaded multiple times at different addresses. In this case, we don't
+ // need to check both against each other since they would have resolved the same way at compile
+ // time.
+ std::unordered_set<std::string> unique_locations;
for (const std::unique_ptr<const OatFile>& loaded_oat_file : oat_files_) {
- if (loaded_oat_file.get() != boot_oat) {
+ DCHECK_NE(loaded_oat_file.get(), oat_file);
+ const std::string& location = loaded_oat_file->GetLocation();
+ if (loaded_oat_file.get() != boot_oat &&
+ location != oat_file->GetLocation() &&
+ unique_locations.find(location) == unique_locations.end()) {
+ unique_locations.insert(location);
AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue);
}
}
@@ -232,12 +243,12 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Now drain the queue.
while (!queue.empty()) {
// Modifying the top element is only safe if we pop right after.
- DexFileAndClassPair compare_pop(std::move(const_cast<DexFileAndClassPair&>(queue.top())));
+ DexFileAndClassPair compare_pop(queue.top());
queue.pop();
// Compare against the following elements.
while (!queue.empty()) {
- DexFileAndClassPair top(std::move(const_cast<DexFileAndClassPair&>(queue.top())));
+ DexFileAndClassPair top(queue.top());
if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
// Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
@@ -249,7 +260,6 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
top.GetDexFile()->GetLocation().c_str());
return true;
}
- // Pop it.
queue.pop();
AddNext(&top, &queue);
} else {
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
new file mode 100644
index 0000000000..9786c05a9d
--- /dev/null
+++ b/runtime/oat_quick_method_header.cc
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_quick_method_header.h"
+
+#include "art_method.h"
+#include "mapping_table.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+OatQuickMethodHeader::OatQuickMethodHeader(
+ uint32_t mapping_table_offset,
+ uint32_t vmap_table_offset,
+ uint32_t gc_map_offset,
+ uint32_t frame_size_in_bytes,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ uint32_t code_size)
+ : mapping_table_offset_(mapping_table_offset),
+ vmap_table_offset_(vmap_table_offset),
+ gc_map_offset_(gc_map_offset),
+ frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
+ code_size_(code_size) {}
+
+OatQuickMethodHeader::~OatQuickMethodHeader() {}
+
+uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
+ const uintptr_t pc,
+ bool abort_on_failure) const {
+ const void* entry_point = GetEntryPoint();
+ uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+ if (IsOptimized()) {
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ if (stack_map.IsValid()) {
+ return stack_map.GetDexPc(encoding);
+ }
+ } else {
+ MappingTable table(GetMappingTable());
+ // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
+ // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
+ if (table.TotalSize() == 0) {
+ DCHECK(method->IsNative());
+ return DexFile::kDexNoIndex;
+ }
+
+ // Assume the caller wants a pc-to-dex mapping so check here first.
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ // Now check dex-to-pc mappings.
+ typedef MappingTable::DexToPcIterator It2;
+ for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ }
+ if (abort_on_failure) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Failed to find Dex offset for PC offset "
+ << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << method->GetEntryPointFromQuickCompiledCode()
+ << ") in " << PrettyMethod(method);
+ }
+ return DexFile::kDexNoIndex;
+}
+
+uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
+ const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure) const {
+ const void* entry_point = GetEntryPoint();
+ if (IsOptimized()) {
+ // Optimized code does not have a mapping table. Search for the dex-to-pc
+ // mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ }
+ } else {
+ MappingTable table(GetMappingTable());
+ if (table.TotalSize() == 0) {
+ DCHECK_EQ(dex_pc, 0U);
+ return 0; // Special no mapping/pc == 0 case
+ }
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ }
+
+ if (abort_on_failure) {
+ ScopedObjectAccess soa(Thread::Current());
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(method);
+ }
+ return UINTPTR_MAX;
+}
+
+} // namespace art
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
new file mode 100644
index 0000000000..6eadd87d38
--- /dev/null
+++ b/runtime/oat_quick_method_header.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
+#define ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
+
+#include "arch/instruction_set.h"
+#include "base/macros.h"
+#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
+
+namespace art {
+
+class ArtMethod;
+
+// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
+class PACKED(4) OatQuickMethodHeader {
+ public:
+ OatQuickMethodHeader(uint32_t mapping_table_offset = 0U,
+ uint32_t vmap_table_offset = 0U,
+ uint32_t gc_map_offset = 0U,
+ uint32_t frame_size_in_bytes = 0U,
+ uint32_t core_spill_mask = 0U,
+ uint32_t fp_spill_mask = 0U,
+ uint32_t code_size = 0U);
+
+ ~OatQuickMethodHeader();
+
+ OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
+
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
+ return pc - reinterpret_cast<uintptr_t>(GetEntryPoint());
+ }
+
+ bool IsOptimized() const {
+ return gc_map_offset_ == 0 && vmap_table_offset_ != 0;
+ }
+
+ CodeInfo GetOptimizedCodeInfo() const {
+ DCHECK(IsOptimized());
+ const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_);
+ return CodeInfo(data);
+ }
+
+ const uint8_t* GetCode() const {
+ return code_;
+ }
+
+ const uint8_t* GetNativeGcMap() const {
+ return (gc_map_offset_ == 0) ? nullptr : code_ - gc_map_offset_;
+ }
+
+ const uint8_t* GetMappingTable() const {
+ return (mapping_table_offset_ == 0) ? nullptr : code_ - mapping_table_offset_;
+ }
+
+ const uint8_t* GetVmapTable() const {
+ CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
+ return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
+ }
+
+ bool Contains(uintptr_t pc) const {
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+ return code_start <= pc && pc <= (code_start + code_size_);
+ }
+
+ const uint8_t* GetEntryPoint() const {
+ // When the runtime architecture is ARM, `kRuntimeISA` is set to `kArm`
+ // (not `kThumb2`), *but* we always generate code for the Thumb-2
+ // instruction set anyway. Thumb-2 requires the entrypoint to be of
+ // offset 1.
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ return (kRuntimeISA == kArm)
+ ? reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(code_) | 1)
+ : code_;
+ }
+
+ template <bool kCheckFrameSize = true>
+ uint32_t GetFrameSizeInBytes() {
+ uint32_t result = frame_info_.FrameSizeInBytes();
+ if (kCheckFrameSize) {
+ DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ }
+ return result;
+ }
+
+ QuickMethodFrameInfo GetFrameInfo() const {
+ return frame_info_;
+ }
+
+ uintptr_t ToNativeQuickPc(ArtMethod* method,
+ const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure = true) const;
+
+ uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
+
+ // The offset in bytes from the start of the mapping table to the end of the header.
+ uint32_t mapping_table_offset_;
+ // The offset in bytes from the start of the vmap table to the end of the header.
+ uint32_t vmap_table_offset_;
+ // The offset in bytes from the start of the gc map to the end of the header.
+ uint32_t gc_map_offset_;
+ // The stack frame information.
+ QuickMethodFrameInfo frame_info_;
+ // The code size in bytes.
+ uint32_t code_size_;
+ // The actual code.
+ uint8_t code_[0];
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
diff --git a/runtime/quick/quick_method_frame_info.h b/runtime/quick/quick_method_frame_info.h
index 684d4da30e..71f8265bef 100644
--- a/runtime/quick/quick_method_frame_info.h
+++ b/runtime/quick/quick_method_frame_info.h
@@ -50,6 +50,10 @@ class PACKED(4) QuickMethodFrameInfo {
return fp_spill_mask_;
}
+ size_t GetReturnPcOffset() const {
+ return FrameSizeInBytes() - sizeof(void*);
+ }
+
private:
uint32_t frame_size_in_bytes_;
uint32_t core_spill_mask_;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 7ba19ab8d6..53b4f3a3b5 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,7 +17,6 @@
#include "quick_exception_handler.h"
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
@@ -27,6 +26,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
+#include "oat_quick_method_header.h"
#include "stack_map.h"
#include "verifier/method_verifier.h"
@@ -36,13 +36,19 @@ static constexpr bool kDebugExceptionDelivery = false;
static constexpr size_t kInvalidFrameDepth = 0xffffffff;
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
- : self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
- method_tracing_active_(is_deoptimization ||
- Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
- handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_quick_arg0_(0),
- handler_method_(nullptr), handler_dex_pc_(0), clear_exception_(false),
- handler_frame_depth_(kInvalidFrameDepth) {
-}
+ : self_(self),
+ context_(self->GetLongJumpContext()),
+ is_deoptimization_(is_deoptimization),
+ method_tracing_active_(is_deoptimization ||
+ Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
+ handler_quick_frame_(nullptr),
+ handler_quick_frame_pc_(0),
+ handler_method_header_(nullptr),
+ handler_quick_arg0_(0),
+ handler_method_(nullptr),
+ handler_dex_pc_(0),
+ clear_exception_(false),
+ handler_frame_depth_(kInvalidFrameDepth) {}
// Finds catch handler.
class CatchBlockStackVisitor FINAL : public StackVisitor {
@@ -62,6 +68,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
uint32_t next_dex_pc;
ArtMethod* next_art_method;
bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
@@ -101,8 +108,10 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
- GetCurrentCode().ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
+ GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
+ method, found_dex_pc, /* is_catch_handler */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
return false; // End stack walk.
} else if (UNLIKELY(GetThread()->HasDebuggerShadowFrames())) {
// We are going to unwind this frame. Did we prepare a shadow frame for debugging?
@@ -160,8 +169,8 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
}
// If the handler is in optimized code, we need to set the catch environment.
if (*handler_quick_frame_ != nullptr &&
- handler_method_ != nullptr &&
- ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*))) {
+ handler_method_header_ != nullptr &&
+ handler_method_header_->IsOptimized()) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
}
@@ -202,14 +211,14 @@ static VRegKind ToVRegKind(DexRegisterLocation::Kind kind) {
void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
DCHECK(!is_deoptimization_);
DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
- DCHECK(handler_method_ != nullptr && ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*)));
+ DCHECK(handler_method_ != nullptr && handler_method_header_->IsOptimized());
if (kDebugExceptionDelivery) {
self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
}
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
- CodeInfo code_info = ArtCode(handler_quick_frame_).GetOptimizedCodeInfo();
+ CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
// Find stack map of the throwing instruction.
@@ -285,6 +294,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
// and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
if (!stacked_shadow_frame_pushed_) {
// In case there is no deoptimized shadow frame for this upcall, we still
// need to push a nullptr to the stack since there is always a matching pop after
@@ -305,7 +315,43 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
CHECK_EQ(GetFrameDepth(), 1U);
return true;
} else {
- HandleDeoptimization(method);
+ // Check if a shadow frame already exists for debugger's set-local-value purpose.
+ const size_t frame_id = GetFrameId();
+ ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
+ const bool* updated_vregs;
+ const size_t num_regs = method->GetCodeItem()->registers_size_;
+ if (new_frame == nullptr) {
+ new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, method, GetDexPc());
+ updated_vregs = nullptr;
+ } else {
+ updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
+ DCHECK(updated_vregs != nullptr);
+ }
+ if (GetCurrentOatQuickMethodHeader()->IsOptimized()) {
+ HandleOptimizingDeoptimization(method, new_frame, updated_vregs);
+ } else {
+ HandleQuickDeoptimization(method, new_frame, updated_vregs);
+ }
+ if (updated_vregs != nullptr) {
+ // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
+ // array so this must come after we processed the frame.
+ GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
+ DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
+ }
+ if (prev_shadow_frame_ != nullptr) {
+ prev_shadow_frame_->SetLink(new_frame);
+ } else {
+ // Will be popped after the long jump after DeoptimizeStack(),
+ // right before interpreter::EnterInterpreterFromDeoptimize().
+ stacked_shadow_frame_pushed_ = true;
+ GetThread()->PushStackedShadowFrame(
+ new_frame,
+ single_frame_deopt_
+ ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
+ : StackedShadowFrameType::kDeoptimizationShadowFrame);
+ }
+ prev_shadow_frame_ = new_frame;
+
if (single_frame_deopt_ && !IsInInlinedFrame()) {
// Single-frame deopt ends at the first non-inlined frame and needs to store that method.
exception_handler_->SetHandlerQuickArg0(reinterpret_cast<uintptr_t>(method));
@@ -316,16 +362,103 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
private:
+ void HandleOptimizingDeoptimization(ArtMethod* m,
+ ShadowFrame* new_frame,
+ const bool* updated_vregs)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
+ DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+
+ for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+ if (updated_vregs != nullptr && updated_vregs[vreg]) {
+ // Keep the value set by debugger.
+ continue;
+ }
+
+ DexRegisterLocation::Kind location =
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ static constexpr uint32_t kDeadValue = 0xEBADDE09;
+ uint32_t value = kDeadValue;
+ bool is_reference = false;
+
+ switch (location) {
+ case DexRegisterLocation::Kind::kInStack: {
+ const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
+ number_of_vregs,
+ code_info,
+ encoding);
+ const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
+ value = *reinterpret_cast<const uint32_t*>(addr);
+ uint32_t bit = (offset >> 2);
+ if (stack_mask.size_in_bits() > bit && stack_mask.LoadBit(bit)) {
+ is_reference = true;
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInRegisterHigh:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
+ uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+ bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
+ CHECK(result);
+ if (location == DexRegisterLocation::Kind::kInRegister) {
+ if (((1u << reg) & register_mask) != 0) {
+ is_reference = true;
+ }
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kConstant: {
+ value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+ if (value == 0) {
+ // Make it a reference for extra safety.
+ is_reference = true;
+ }
+ break;
+ }
+ case DexRegisterLocation::Kind::kNone: {
+ break;
+ }
+ default: {
+ LOG(FATAL)
+ << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(
+ vreg_map.GetLocationInternalKind(vreg,
+ number_of_vregs,
+ code_info,
+ encoding));
+ UNREACHABLE();
+ }
+ }
+ if (is_reference) {
+ new_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(value));
+ } else {
+ new_frame->SetVReg(vreg, value);
+ }
+ }
+ }
+
static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
return static_cast<VRegKind>(kinds.at(reg * 2));
}
- void HandleDeoptimization(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void HandleQuickDeoptimization(ArtMethod* m,
+ ShadowFrame* new_frame,
+ const bool* updated_vregs)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr) << "No code item for " << PrettyMethod(m);
uint16_t num_regs = code_item->registers_size_;
uint32_t dex_pc = GetDexPc();
- StackHandleScope<2> hs(GetThread()); // Dex cache, class loader and method.
+ StackHandleScope<2> hs(GetThread()); // Dex cache and class loader.
mirror::Class* declaring_class = m->GetDeclaringClass();
Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
@@ -335,17 +468,6 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
true, true);
bool verifier_success = verifier.Verify();
CHECK(verifier_success) << PrettyMethod(m);
- // Check if a shadow frame already exists for debugger's set-local-value purpose.
- const size_t frame_id = GetFrameId();
- ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
- const bool* updated_vregs;
- if (new_frame == nullptr) {
- new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, m, dex_pc);
- updated_vregs = nullptr;
- } else {
- updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
- DCHECK(updated_vregs != nullptr);
- }
{
ScopedStackedShadowFramePusher pusher(GetThread(), new_frame,
StackedShadowFrameType::kShadowFrameUnderConstruction);
@@ -452,25 +574,6 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
}
}
- if (updated_vregs != nullptr) {
- // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
- // array so this must come after we processed the frame.
- GetThread()->RemoveDebuggerShadowFrameMapping(frame_id);
- DCHECK(GetThread()->FindDebuggerShadowFrame(frame_id) == nullptr);
- }
- if (prev_shadow_frame_ != nullptr) {
- prev_shadow_frame_->SetLink(new_frame);
- } else {
- // Will be popped after the long jump after DeoptimizeStack(),
- // right before interpreter::EnterInterpreterFromDeoptimize().
- stacked_shadow_frame_pushed_ = true;
- GetThread()->PushStackedShadowFrame(
- new_frame,
- single_frame_deopt_
- ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
- : StackedShadowFrameType::kDeoptimizationShadowFrame);
- }
- prev_shadow_frame_ = new_frame;
}
QuickExceptionHandler* const exception_handler_;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 89d6a25128..eedf83f6df 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -71,6 +71,10 @@ class QuickExceptionHandler {
handler_quick_frame_pc_ = handler_quick_frame_pc;
}
+ void SetHandlerMethodHeader(const OatQuickMethodHeader* handler_method_header) {
+ handler_method_header_ = handler_method_header;
+ }
+
void SetHandlerQuickArg0(uintptr_t handler_quick_arg0) {
handler_quick_arg0_ = handler_quick_arg0;
}
@@ -115,6 +119,8 @@ class QuickExceptionHandler {
ArtMethod** handler_quick_frame_;
// PC to branch to for the handler.
uintptr_t handler_quick_frame_pc_;
+ // Quick code of the handler.
+ const OatQuickMethodHeader* handler_method_header_;
// The value for argument 0.
uintptr_t handler_quick_arg0_;
// The handler method to report to the debugger.
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index bd89be5d17..c7c270946b 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -157,7 +157,8 @@ class ReflectionTest : public CommonCompilerTest {
result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MAX, result.GetB());
- args[0].b = (SCHAR_MIN << 24) >> 24;
+ static_assert(SCHAR_MIN == -128, "SCHAR_MIN unexpected");
+ args[0].b = SCHAR_MIN;
result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args);
EXPECT_EQ(SCHAR_MIN, result.GetB());
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cd09bee4e6..6c459a3950 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -140,6 +140,12 @@ namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
static constexpr bool kEnableJavaStackTraceHandler = false;
+// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
+// linking.
+static constexpr double kLowMemoryMinLoadFactor = 0.5;
+static constexpr double kLowMemoryMaxLoadFactor = 0.8;
+static constexpr double kNormalMinLoadFactor = 0.4;
+static constexpr double kNormalMaxLoadFactor = 0.7;
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -200,7 +206,9 @@ Runtime::Runtime()
no_sig_chain_(false),
is_native_bridge_loaded_(false),
zygote_max_failed_boots_(0),
- experimental_flags_(ExperimentalFlags::kNone) {
+ experimental_flags_(ExperimentalFlags::kNone),
+ oat_file_manager_(nullptr),
+ is_low_memory_mode_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
}
@@ -886,6 +894,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
+ is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
ATRACE_BEGIN("CreateHeap");
@@ -1804,4 +1813,12 @@ LinearAlloc* Runtime::CreateLinearAlloc() {
: new LinearAlloc(arena_pool_.get());
}
+double Runtime::GetHashTableMinLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
+}
+
+double Runtime::GetHashTableMaxLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 458f08a316..7b1fdb21c4 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -580,6 +580,9 @@ class Runtime {
return *oat_file_manager_;
}
+ double GetHashTableMinLoadFactor() const;
+ double GetHashTableMaxLoadFactor() const;
+
private:
static void InitPlatformSignalHandlers();
@@ -780,6 +783,9 @@ class Runtime {
// Oat file manager, keeps track of what oat files are open.
OatFileManager* oat_file_manager_;
+ // Whether or not we are on a low RAM device.
+ bool is_low_memory_mode_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 44a13c9020..f0b3c4e4cb 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@ struct Backtrace {
public:
explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), "\t", nullptr, nullptr, raw_context_);
+ DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
}
private:
// Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/stack.cc b/runtime/stack.cc
index d8d916c7ee..9359d27822 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,7 +17,6 @@
#include "stack.h"
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -25,10 +24,13 @@
#include "gc_map.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "linear_alloc.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "oat_quick_method_header.h"
#include "quick/quick_method_frame_info.h"
#include "runtime.h"
#include "thread.h"
@@ -103,6 +105,7 @@ StackVisitor::StackVisitor(Thread* thread,
cur_shadow_frame_(nullptr),
cur_quick_frame_(nullptr),
cur_quick_frame_pc_(0),
+ cur_oat_quick_method_header_(nullptr),
num_frames_(num_frames),
cur_depth_(0),
current_inlining_depth_(0),
@@ -111,9 +114,9 @@ StackVisitor::StackVisitor(Thread* thread,
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- ArtCode outer_code = GetCurrentCode();
- uint32_t native_pc_offset = outer_code.NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = outer_code.GetOptimizedCodeInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -142,8 +145,11 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
+ } else if (cur_oat_quick_method_header_ == nullptr) {
+ return DexFile::kDexNoIndex;
} else {
- return GetCurrentCode().ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ return cur_oat_quick_method_header_->ToDexPc(
+ GetMethod(), cur_quick_frame_pc_, abort_on_failure);
}
} else {
return 0;
@@ -161,8 +167,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
HandleScope* hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- GetCurrentCode().GetHandleScopeOffset().SizeValue());
+ reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
@@ -192,7 +197,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
size_t StackVisitor::GetNativePcOffset() const {
DCHECK(!IsShadowFrame());
- return GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
}
bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
@@ -201,10 +206,11 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
}
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header->IsOptimized()) {
return true; // TODO: Implement.
}
- const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = method_header->GetNativeGcMap();
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -213,7 +219,7 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
const uint8_t* reg_bitmap = nullptr;
if (num_regs > 0) {
- uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
}
@@ -252,7 +258,7 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -267,8 +273,9 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -294,10 +301,11 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, code_item->registers_size_);
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
@@ -402,7 +410,7 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -417,8 +425,9 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -477,7 +486,7 @@ bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -492,8 +501,9 @@ bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t ne
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -584,7 +594,7 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (cur_oat_quick_method_header_->IsOptimized()) {
return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -599,8 +609,9 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
bool StackVisitor::SetVRegPairFromQuickCode(
ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
DCHECK_EQ(m, GetMethod());
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -717,14 +728,14 @@ void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -821,6 +832,45 @@ static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(
return thread->GetInstrumentationStack()->at(depth);
}
+static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
+ return;
+ }
+
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ return;
+ }
+
+ const void* code = method->GetEntryPointFromQuickCompiledCode();
+ if (code == GetQuickInstrumentationEntryPoint()) {
+ return;
+ }
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickResolutionStub(code)) {
+ return;
+ }
+
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
+
+ uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
+ EntryPointToCodePointer(code))[-1].code_size_;
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
+ CHECK(code_start <= pc && pc <= (code_start + code_size))
+ << PrettyMethod(method)
+ << " pc=" << std::hex << pc
+ << " code=" << code
+ << " size=" << code_size;
+}
+
void StackVisitor::SanityCheckFrame() const {
if (kIsDebugBuild) {
ArtMethod* method = GetMethod();
@@ -859,9 +909,9 @@ void StackVisitor::SanityCheckFrame() const {
}
}
if (cur_quick_frame_ != nullptr) {
- GetCurrentCode().AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
+ AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
// Frame sanity.
- size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
@@ -871,13 +921,80 @@ void StackVisitor::SanityCheckFrame() const {
// TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
// const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
const size_t kMaxExpectedFrameSize = 2 * KB;
- CHECK_LE(frame_size, kMaxExpectedFrameSize);
- size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
+ CHECK_LE(frame_size, kMaxExpectedFrameSize) << PrettyMethod(method);
+ size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
CHECK_LT(return_pc_offset, frame_size);
}
}
}
+// Counts the number of references in the parameter list of the corresponding method.
+// Note: Thus does _not_ include "this" for non-static methods.
+static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t shorty_len;
+ const char* shorty = method->GetShorty(&shorty_len);
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ return refs;
+}
+
+QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
+ if (cur_oat_quick_method_header_ != nullptr) {
+ return cur_oat_quick_method_header_->GetFrameInfo();
+ }
+
+ ArtMethod* method = GetMethod();
+ Runtime* runtime = Runtime::Current();
+
+ if (method->IsAbstract()) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+
+ // This goes before IsProxyMethod since runtime methods have a null declaring class.
+ if (method->IsRuntimeMethod()) {
+ return runtime->GetRuntimeMethodFrameInfo(method);
+ }
+
+ // For Proxy method we add special handling for the direct method case (there is only one
+ // direct method - constructor). Direct method is cloned from original
+ // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+ // quick compiled method without any stubs. So the frame info should be returned as it is a
+ // quick method not a stub. However, if instrumentation stubs are installed, the
+ // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+ // oat code pointer, thus we have to add a special case here.
+ if (method->IsProxyMethod()) {
+ if (method->IsDirect()) {
+ CHECK(method->IsConstructor());
+ const void* code_pointer =
+ EntryPointToCodePointer(method->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+ } else {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ }
+
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ DCHECK(method->IsNative());
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
+ DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
+ // Generic JNI frame.
+ uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+ // Callee saves + handle scope + method ref + alignment
+ // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+ size_t frame_size = RoundUp(
+ callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size,
+ kStackAlignment);
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+}
+
void StackVisitor::WalkStack(bool include_transitions) {
DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
CHECK_EQ(cur_depth_, 0U);
@@ -890,19 +1007,23 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
+ cur_oat_quick_method_header_ = nullptr;
if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
ArtMethod* method = *cur_quick_frame_;
while (method != nullptr) {
+ cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
SanityCheckFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
- && GetCurrentCode().IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ && (cur_oat_quick_method_header_ != nullptr)
+ && cur_oat_quick_method_header_->IsOptimized()) {
+ CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset =
+ cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
@@ -925,14 +1046,16 @@ void StackVisitor::WalkStack(bool include_transitions) {
return;
}
+ QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
if (context_ != nullptr) {
- context_->FillCalleeSaves(*this);
+ context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
}
- size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
// Compute PC for next stack frame from return PC.
- size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
+ size_t frame_size = frame_info.FrameSizeInBytes();
+ size_t return_pc_offset = frame_size - sizeof(void*);
uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
+
if (UNLIKELY(exit_stubs_installed)) {
// While profiling, the return pc is restored from the side stack, except when walking
// the stack for an exception where the side stack will be unwound in VisitFrame.
@@ -963,7 +1086,6 @@ void StackVisitor::WalkStack(bool include_transitions) {
return_pc = instrumentation_frame.return_pc_;
}
}
- ArtCode code = GetCurrentCode();
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
@@ -971,8 +1093,11 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (kDebugStackWalk) {
LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
- << " optimized=" << code.IsOptimized(sizeof(void*))
+ << std::boolalpha
+ << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
+ cur_oat_quick_method_header_->IsOptimized())
<< " native=" << method->IsNative()
+ << std::noboolalpha
<< " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
<< "," << method->GetEntryPointFromJni()
<< " next=" << *cur_quick_frame_;
diff --git a/runtime/stack.h b/runtime/stack.h
index 3e0566d2f0..1276b244e7 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,13 +20,13 @@
#include <stdint.h>
#include <string>
-#include "art_code.h"
#include "arch/instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
#include "mirror/object_reference.h"
+#include "quick/quick_method_frame_info.h"
#include "read_barrier.h"
#include "verify_object.h"
@@ -40,6 +40,7 @@ class ArtMethod;
class Context;
class HandleScope;
class InlineInfo;
+class OatQuickMethodHeader;
class ScopedObjectAccess;
class ShadowFrame;
class StackVisitor;
@@ -532,6 +533,9 @@ class StackVisitor {
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
public:
virtual ~StackVisitor() {}
@@ -561,18 +565,6 @@ class StackVisitor {
size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
- uintptr_t* CalleeSaveAddress(int num, size_t frame_size) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
- // Callee saves are held at the top of the frame
- DCHECK(GetMethod() != nullptr);
- uint8_t* save_addr =
- reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size - ((num + 1) * sizeof(void*));
-#if defined(__i386__) || defined(__x86_64__)
- save_addr -= sizeof(void*); // account for return address
-#endif
- return reinterpret_cast<uintptr_t*>(save_addr);
- }
-
// Returns the height of the stack in the managed stack frames, including transitions.
size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
@@ -735,7 +727,11 @@ class StackVisitor {
static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
- ArtCode GetCurrentCode() const { return ArtCode(cur_quick_frame_); }
+ const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
+ return cur_oat_quick_method_header_;
+ }
+
+ QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
@@ -774,8 +770,6 @@ class StackVisitor {
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
@@ -813,6 +807,7 @@ class StackVisitor {
ShadowFrame* cur_shadow_frame_;
ArtMethod** cur_quick_frame_;
uintptr_t cur_quick_frame_pc_;
+ const OatQuickMethodHeader* cur_oat_quick_method_header_;
// Lazily computed, number of frames in the stack.
size_t num_frames_;
// Depth of the frame we're currently at.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8e0c288185..114e0f6a9c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,7 +32,6 @@
#include <sstream>
#include "arch/context.h"
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
@@ -58,6 +57,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
+#include "oat_quick_method_header.h"
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
@@ -1496,8 +1496,7 @@ void Thread::DumpStack(std::ostream& os) const {
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
- ArtCode art_code(method);
- DumpNativeStack(os, GetTid(), " native: ", method, &art_code);
+ DumpNativeStack(os, GetTid(), " native: ", method);
}
DumpJavaStack(os);
} else {
@@ -2640,38 +2639,15 @@ class ReferenceMapVisitor : public StackVisitor {
VisitDeclaringClass(m);
DCHECK(m != nullptr);
size_t num_regs = shadow_frame->NumberOfVRegs();
- if (m->IsNative() || shadow_frame->HasReferenceArray()) {
- // handle scope for JNI or References for interpreter.
- for (size_t reg = 0; reg < num_regs; ++reg) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
- }
- }
- }
- } else {
- // Java method.
- // Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
- CHECK(gc_map != nullptr) << PrettyMethod(m);
- verifier::DexPcToReferenceMap dex_gc_map(gc_map);
- uint32_t dex_pc = shadow_frame->GetDexPC();
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- DCHECK(reg_bitmap != nullptr);
- num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- if (TestBitmap(reg, reg_bitmap)) {
- mirror::Object* ref = shadow_frame->GetVRegReference(reg);
- if (ref != nullptr) {
- mirror::Object* new_ref = ref;
- visitor_(&new_ref, reg, this);
- if (new_ref != ref) {
- shadow_frame->SetVRegReference(reg, new_ref);
- }
- }
+ DCHECK(m->IsNative() || shadow_frame->HasReferenceArray());
+ // handle scope for JNI or References for interpreter.
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ mirror::Object* ref = shadow_frame->GetVRegReference(reg);
+ if (ref != nullptr) {
+ mirror::Object* new_ref = ref;
+ visitor_(&new_ref, reg, this);
+ if (new_ref != ref) {
+ shadow_frame->SetVRegReference(reg, new_ref);
}
}
}
@@ -2702,11 +2678,12 @@ class ReferenceMapVisitor : public StackVisitor {
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (GetCurrentCode().IsOptimized(sizeof(void*))) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header->IsOptimized()) {
auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
- uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
+ CodeInfo code_info = method_header->GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
@@ -2736,7 +2713,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
} else {
- const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = method_header->GetNativeGcMap();
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -2744,12 +2721,11 @@ class ReferenceMapVisitor : public StackVisitor {
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = map.RegWidth() * 8;
if (num_regs > 0) {
- uintptr_t native_pc_offset =
- GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
- const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
- QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
+ const VmapTable vmap_table(method_header->GetVmapTable());
+ QuickMethodFrameInfo frame_info = method_header->GetFrameInfo();
// For all dex registers in the bitmap
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
diff --git a/runtime/thread.h b/runtime/thread.h
index 8cea10c844..8f3461acdf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -626,6 +626,24 @@ class Thread {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
}
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ rosalloc_runs));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_top));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_end));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 745aa6386e..ab342aa882 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -752,26 +752,31 @@ void Trace::FinishTracing() {
}
}
-void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc) {
- UNUSED(thread, this_object, method, new_dex_pc);
+void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t new_dex_pc) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
- const JValue& field_value)
+void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED,
+ const JValue& field_value ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -804,9 +809,9 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 40cd6d340c..62af380219 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,7 +25,6 @@
#include <unistd.h>
#include <memory>
-#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
@@ -37,6 +36,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string.h"
+#include "oat_quick_method_header.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "utf-inl.h"
@@ -1090,10 +1090,20 @@ static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream
map_src.c_str(), offset));
RunCommand(cmdline.c_str(), &os, prefix);
}
+
+static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFETY_ANALYSIS {
+ uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+ method->GetEntryPointFromQuickCompiledCode()));
+ if (code == 0) {
+ return pc == 0;
+ }
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return code <= pc && pc <= (code + code_size);
+}
#endif
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
- ArtMethod* current_method, ArtCode* current_code, void* ucontext_ptr) {
+ ArtMethod* current_method, void* ucontext_ptr) {
#if __linux__
// b/18119146
if (RUNNING_ON_MEMORY_TOOL != 0) {
@@ -1147,10 +1157,10 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
os << "+" << it->func_offset;
}
try_addr2line = true;
- } else if (
- current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_code->PcIsWithinQuickCode(it->pc)) {
- const void* start_of_code = current_code->GetQuickOatEntryPoint(sizeof(void*));
+ } else if (current_method != nullptr &&
+ Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+ PcIsWithinQuickCode(current_method, it->pc)) {
+ const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
} else {
@@ -1164,7 +1174,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
}
}
#else
- UNUSED(os, tid, prefix, current_method, current_code, ucontext_ptr);
+ UNUSED(os, tid, prefix, current_method, ucontext_ptr);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index b67f273f15..79502c7971 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -222,7 +222,7 @@ void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- ArtMethod* current_method = nullptr, ArtCode* current_code = nullptr, void* ucontext = nullptr)
+ ArtMethod* current_method = nullptr, void* ucontext = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
@@ -272,18 +272,15 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
class VoidFunctor {
public:
template <typename A>
- inline void operator() (A a) const {
- UNUSED(a);
+ inline void operator() (A a ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B>
- inline void operator() (A a, B b) const {
- UNUSED(a, b);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B, typename C>
- inline void operator() (A a, B b, C c) const {
- UNUSED(a, b, c);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED, C c ATTRIBUTE_UNUSED) const {
}
};
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 4f662d5a8f..90e24b9632 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -27,20 +27,25 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
+ const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
methods_offset_(types_offset_ +
- RoundUp(TypesSize(dex_file->NumTypeIds()), MethodsAlignment())),
+ RoundUp(TypesSize(header.type_ids_size_), MethodsAlignment())),
strings_offset_(methods_offset_ +
- RoundUp(MethodsSize(dex_file->NumMethodIds()), StringsAlignment())),
+ RoundUp(MethodsSize(header.method_ids_size_), StringsAlignment())),
fields_offset_(strings_offset_ +
- RoundUp(StringsSize(dex_file->NumStringIds()), FieldsAlignment())),
+ RoundUp(StringsSize(header.string_ids_size_), FieldsAlignment())),
size_(fields_offset_ +
- RoundUp(FieldsSize(dex_file->NumFieldIds()), Alignment())) {
+ RoundUp(FieldsSize(header.field_ids_size_), Alignment())) {
DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+ : DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
+}
+
inline size_t DexCacheArraysLayout::Alignment() const {
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index d50be5ac03..cd84460c3b 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
#define ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
+#include "dex_file.h"
+
namespace art {
/**
@@ -36,6 +38,9 @@ class DexCacheArraysLayout {
size_(0u) {
}
+ // Construct a layout for a particular dex file header.
+ DexCacheArraysLayout(size_t pointer_size, const DexFile::Header& header);
+
// Construct a layout for a particular dex file.
DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 33c90e3000..f48b1e1212 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -338,6 +338,8 @@ void RegisterLine::CheckLiteralOp(MethodVerifier* verifier, const Instruction* i
}
}
+static constexpr uint32_t kVirtualNullRegister = std::numeric_limits<uint32_t>::max();
+
void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) {
const RegType& reg_type = GetRegisterType(verifier, reg_idx);
if (!reg_type.IsReferenceTypes()) {
@@ -352,6 +354,12 @@ void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32
}
} else {
if (SetRegToLockDepth(reg_idx, monitors_.size())) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ if (reg_type.IsZero()) {
+ SetRegToLockDepth(kVirtualNullRegister, monitors_.size());
+ }
+
monitors_.push_back(insn_idx);
} else {
verifier->Fail(VERIFY_ERROR_LOCKING);
@@ -377,7 +385,19 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
}
} else {
monitors_.pop_back();
- if (!IsSetLockDepth(reg_idx, monitors_.size())) {
+
+ bool success = IsSetLockDepth(reg_idx, monitors_.size());
+
+ if (!success && reg_type.IsZero()) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ success = IsSetLockDepth(kVirtualNullRegister, monitors_.size());
+ if (success) {
+ reg_idx = kVirtualNullRegister;
+ }
+ }
+
+ if (!success) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "monitor-exit not unlocking the top of the monitor stack while verifying "
@@ -385,12 +405,41 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
*verifier->GetMethodReference().dex_file);
}
} else {
- // Record the register was unlocked
+ // Record the register was unlocked. This clears all aliases, thus it will also clear the
+ // null lock, if necessary.
ClearRegToLockDepth(reg_idx, monitors_.size());
}
}
}
+// Check whether there is another register in the search map that is locked the same way as the
+// register in the src map. This establishes an alias.
+static bool FindLockAliasedRegister(
+ uint32_t src,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+ auto it = src_map.find(src);
+ if (it == src_map.end()) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+ uint32_t src_lock_levels = it->second;
+ if (src_lock_levels == 0) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+
+ // Scan the map for the same value.
+ for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+ if (pair.first != src && pair.second == src_lock_levels) {
+ return true;
+ }
+ }
+
+ // Nothing found, no alias.
+ return false;
+}
+
bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
bool changed = false;
DCHECK(incoming_line != nullptr);
@@ -417,9 +466,29 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
size_t depths = reg_to_lock_depths_.count(idx);
size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
if (depths != incoming_depths) {
- if (depths == 0 || incoming_depths == 0) {
- reg_to_lock_depths_.erase(idx);
- } else {
+ // Stack levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // | |
+ // v0 = v1 {v0=1, v1=1} v0 = v2 {v1=1}
+ // | |
+ // {v1=1}
+ // // Dropping v0, as the status can't be merged
+ // // but the lock info ("locked at depth 1" and)
+ // // "not locked at all") is available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "mismatched stack depths for register v" << idx
@@ -429,20 +498,51 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
}
break;
}
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
} else if (depths > 0) {
// Check whether they're actually the same levels.
uint32_t locked_levels = reg_to_lock_depths_.find(idx)->second;
uint32_t incoming_locked_levels = incoming_line->reg_to_lock_depths_.find(idx)->second;
if (locked_levels != incoming_locked_levels) {
- verifier->Fail(VERIFY_ERROR_LOCKING);
- if (kDumpLockFailures) {
- LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
- << std::hex << locked_levels << std::dec << " != "
- << std::hex << incoming_locked_levels << std::dec << " in "
- << PrettyMethod(verifier->GetMethodReference().dex_method_index,
- *verifier->GetMethodReference().dex_file);
+ // Lock levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // lock v2 {v1=1, v2=2}
+ // | |
+ // v0 = v1 {v0=1, v1=1, v2=2} v0 = v2 {v0=2, v1=1, v2=2}
+ // | |
+ // {v1=1, v2=2}
+ // // Dropping v0, as the status can't be
+ // // merged but the lock info ("locked at
+ // // depth 1" and "locked at depth 2") is
+ // // available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
+ // No aliases for both current and incoming, we'll lose information.
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
+ << std::hex << locked_levels << std::dec << " != "
+ << std::hex << incoming_locked_levels << std::dec << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
+ break;
}
- break;
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
}
}
}
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index c984b17c2c..b76555b00b 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -150,10 +150,15 @@ extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context)
// Do we have a managed handler? If so, run it first.
SpecialSignalHandlerFn managed = user_sigactions[sig].GetSpecialHandler();
if (managed != nullptr) {
+ sigset_t mask, old_mask;
+ sigfillset(&mask);
+ sigprocmask(SIG_BLOCK, &mask, &old_mask);
// Call the handler. If it succeeds, we're done.
if (managed(sig, info, context)) {
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
return;
}
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
}
const struct sigaction& action = user_sigactions[sig].GetAction();
@@ -166,7 +171,10 @@ extern "C" void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context)
}
} else {
if (action.sa_sigaction != nullptr) {
+ sigset_t old_mask;
+ sigprocmask(SIG_BLOCK, &action.sa_mask, &old_mask);
action.sa_sigaction(sig, info, context);
+ sigprocmask(SIG_SETMASK, &old_mask, nullptr);
} else {
signal(sig, SIG_DFL);
raise(sig);
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index f8d321cbec..34fb3f8a01 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -19,15 +19,17 @@
namespace art {
-#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
- int t[] = {__VA_ARGS__}; \
- int t_size = sizeof(t) / sizeof(*t); \
- uintptr_t native_quick_pc = GetCurrentCode().ToNativeQuickPc(dex_pc, \
- /* is_catch_handler */ false, \
- abort_if_not_found); \
- if (native_quick_pc != UINTPTR_MAX) { \
- CheckReferences(t, t_size, GetCurrentCode().NativeQuickPcOffset(native_quick_pc)); \
- } \
+#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
+ int t[] = {__VA_ARGS__}; \
+ int t_size = sizeof(t) / sizeof(*t); \
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); \
+ uintptr_t native_quick_pc = method_header->ToNativeQuickPc(GetMethod(), \
+ dex_pc, \
+ /* is_catch_handler */ false, \
+ abort_if_not_found); \
+ if (native_quick_pc != UINTPTR_MAX) { \
+ CheckReferences(t, t_size, method_header->NativeQuickPcOffset(native_quick_pc)); \
+ } \
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
@@ -49,7 +51,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
@@ -65,7 +67,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
// v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
// v5 is removed from the root set because there is a "merge" operation.
@@ -74,7 +76,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
}
CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/079-phantom/src/Bitmap.java b/test/079-phantom/src/Bitmap.java
index 85eb3ccb97..ff43749e76 100644
--- a/test/079-phantom/src/Bitmap.java
+++ b/test/079-phantom/src/Bitmap.java
@@ -125,7 +125,6 @@ class PhantomWrapper extends PhantomReference {
*/
class BitmapWatcher extends Thread {
ReferenceQueue<PhantomWrapper> mQueue;
- volatile boolean mQuit = false;
BitmapWatcher(ReferenceQueue<PhantomWrapper> queue) {
mQueue = queue;
@@ -133,7 +132,7 @@ class BitmapWatcher extends Thread {
}
public void run() {
- while (!mQuit) {
+ while (true) {
try {
PhantomWrapper ref = (PhantomWrapper) mQueue.remove();
//System.out.println("dequeued ref " + ref.mNativeData +
@@ -142,12 +141,12 @@ class BitmapWatcher extends Thread {
//ref.clear();
} catch (InterruptedException ie) {
System.out.println("intr");
+ break;
}
}
}
public void shutDown() {
- mQuit = true;
interrupt();
}
}
diff --git a/test/087-gc-after-link/src/Main.java b/test/087-gc-after-link/src/Main.java
index 2f6d496f44..7c47e9976f 100644
--- a/test/087-gc-after-link/src/Main.java
+++ b/test/087-gc-after-link/src/Main.java
@@ -91,6 +91,7 @@ public class Main {
* is an error we can't recover from.
*/
meth.invoke(dexFile, name, this);
+ System.out.println("Unreachable");
} finally {
if (dexFile != null) {
/* close the DexFile to make CloseGuard happy */
diff --git a/test/088-monitor-verification/smali/NullLocks.smali b/test/088-monitor-verification/smali/NullLocks.smali
new file mode 100644
index 0000000000..8262f19e22
--- /dev/null
+++ b/test/088-monitor-verification/smali/NullLocks.smali
@@ -0,0 +1,28 @@
+.class public LNullLocks;
+
+.super Ljava/lang/Object;
+
+.method public static run(Z)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertIsManaged()V
+
+ if-eqz v2, :Lfalse
+
+ const v0, 0 # Null.
+ monitor-enter v0
+ const v1, 0 # Another null. This should be detected as an alias, such that the exit
+ # will not fail verification.
+ monitor-exit v1
+
+ monitor-enter v0
+ monitor-exit v1
+
+ monitor-enter v1
+ monitor-exit v0
+
+:Lfalse
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index d742b1410a..212c894bd5 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -221,6 +221,8 @@ public class Main {
IllegalMonitorStateException.class);
runTest("UnbalancedJoin", new Object[] { new Object(), new Object() }, null);
runTest("UnbalancedStraight", new Object[] { new Object(), new Object() }, null);
+ runTest("NullLocks", new Object[] { false }, null);
+ runTest("NullLocks", new Object[] { true }, NullPointerException.class);
}
private static void runTest(String className, Object[] parameters, Class<?> excType) {
diff --git a/test/088-monitor-verification/src/TwoPath.java b/test/088-monitor-verification/src/TwoPath.java
index 2542de7f27..bdc15ad82e 100644
--- a/test/088-monitor-verification/src/TwoPath.java
+++ b/test/088-monitor-verification/src/TwoPath.java
@@ -31,6 +31,8 @@ public class TwoPath {
* Conditionally uses one of the synchronized objects.
*/
public static void twoPath(Object obj1, Object obj2, int x) {
+ Main.assertIsManaged();
+
Object localObj;
synchronized (obj1) {
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 948273abd5..e9946c885c 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -390,6 +390,20 @@ extern "C" bool nb_is_compatible(uint32_t bridge_version ATTRIBUTE_UNUSED) {
#endif
#endif
+static bool cannot_be_blocked(int signum) {
+ // These two sigs cannot be blocked anywhere.
+ if ((signum == SIGKILL) || (signum == SIGSTOP)) {
+ return true;
+ }
+
+ // The invalid rt_sig cannot be blocked.
+ if (((signum >= 32) && (signum < SIGRTMIN)) || (signum > SIGRTMAX)) {
+ return true;
+ }
+
+ return false;
+}
+
// A dummy special handler, continueing after the faulting location. This code comes from
// 004-SignalTest.
static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* context) {
@@ -413,6 +427,23 @@ static bool nb_signalhandler(int sig, siginfo_t* info ATTRIBUTE_UNUSED, void* co
UNUSED(context);
#endif
}
+
+ // Before invoking this handler, all other unclaimed signals must be blocked.
+ // We're trying to check the signal mask to verify its status here.
+ sigset_t tmpset;
+ sigemptyset(&tmpset);
+ sigprocmask(SIG_SETMASK, nullptr, &tmpset);
+ int other_claimed = (sig == SIGSEGV) ? SIGILL : SIGSEGV;
+ for (int signum = 0; signum < NSIG; ++signum) {
+ if (cannot_be_blocked(signum)) {
+ continue;
+ } else if ((sigismember(&tmpset, signum)) && (signum == other_claimed)) {
+ printf("ERROR: The claimed signal %d is blocked\n", signum);
+ } else if ((!sigismember(&tmpset, signum)) && (signum != other_claimed)) {
+ printf("ERROR: The unclaimed signal %d is not blocked\n", signum);
+ }
+ }
+
// We handled this...
return true;
}
diff --git a/test/131-structural-change/expected.txt b/test/131-structural-change/expected.txt
index cc7713d252..1d19278f1e 100644
--- a/test/131-structural-change/expected.txt
+++ b/test/131-structural-change/expected.txt
@@ -1,2 +1,3 @@
+JNI_OnLoad called
Should really reach here.
Done.
diff --git a/test/131-structural-change/src/Main.java b/test/131-structural-change/src/Main.java
index 6cbbd12387..c7488992df 100644
--- a/test/131-structural-change/src/Main.java
+++ b/test/131-structural-change/src/Main.java
@@ -35,7 +35,7 @@ public class Main {
e.printStackTrace(System.out);
}
- boolean haveOatFile = hasOat();
+ boolean haveOatFile = hasOatFile();
boolean gotError = false;
try {
Class<?> bClass = getClass().getClassLoader().loadClass("B");
@@ -45,10 +45,10 @@ public class Main {
e.printStackTrace(System.out);
}
if (haveOatFile ^ gotError) {
- System.out.println("Did not get expected error.");
+ System.out.println("Did not get expected error. " + haveOatFile + " " + gotError);
}
System.out.println("Done.");
}
- private native static boolean hasOat();
+ private native static boolean hasOatFile();
}
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index 134abd17f2..f1885def13 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -537,6 +537,17 @@ public class Main {
return ((SubclassA)a).toString();
}
+
+ /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) reference_type_propagation (after)
+ /// CHECK: ParameterValue klass:Main can_be_null:false exact:false
+ /// CHECK: ParameterValue klass:Super can_be_null:true exact:false
+ /// CHECK: ParameterValue
+ /// CHECK: ParameterValue klass:SubclassA can_be_null:true exact:false
+ /// CHECK: ParameterValue klass:Final can_be_null:true exact:true
+ /// CHECK-NOT: ParameterValue
+ private void argumentCheck(Super s, double d, SubclassA a, Final f) {
+ }
+
public static void main(String[] args) {
}
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 0ee2ff9fda..30f99542a1 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -46,12 +46,12 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVReg(m, 1, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
success = GetVReg(m, 2, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
@@ -83,12 +83,12 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 6fcebdb8b5..64b2336bee 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -64,7 +64,7 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 1u);
bool success = GetVReg(m, 2, kIntVReg, &value);
- if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ if (!IsCurrentFrameInInterpreter() && GetCurrentOatQuickMethodHeader()->IsOptimized()) {
CHECK(!success);
}
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 2a56a7fce7..375a3fc824 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -15,9 +15,9 @@
*/
#include "arch/context.h"
-#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack.h"
#include "thread.h"
@@ -44,7 +44,7 @@ class TestVisitor : public StackVisitor {
found_method_ = true;
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
- GetCurrentCode().IsOptimized(sizeof(void*)) &&
+ GetCurrentOatQuickMethodHeader()->IsOptimized() &&
!Runtime::Current()->IsDebuggable()) {
CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
} else {
diff --git a/test/539-checker-arm64-encodable-immediates/expected.txt b/test/539-checker-arm64-encodable-immediates/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/expected.txt
diff --git a/test/539-checker-arm64-encodable-immediates/info.txt b/test/539-checker-arm64-encodable-immediates/info.txt
new file mode 100644
index 0000000000..efeef33231
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/info.txt
@@ -0,0 +1,2 @@
+Basic tests that check the compiler recognizes when constant values can be
+encoded in the immediate field of instructions.
diff --git a/test/539-checker-arm64-encodable-immediates/src/Main.java b/test/539-checker-arm64-encodable-immediates/src/Main.java
new file mode 100644
index 0000000000..7e3ff9fde8
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /**
+ * Test that the `-1` constant is not synthesized in a register and that we
+ * instead simply switch between `add` and `sub` instructions with the
+ * constant embedded.
+ * We need two uses (or more) of the constant because the compiler always
+ * delegates the immediate value handling to VIXL when there is only one use.
+ */
+
+ /// CHECK-START-ARM64: long Main.addM1(long) register (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK: <<ConstM1:j\d+>> LongConstant -1
+ /// CHECK-NOT: ParallelMove
+ /// CHECK: Add [<<Arg>>,<<ConstM1>>]
+ /// CHECK: Sub [<<Arg>>,<<ConstM1>>]
+
+ /// CHECK-START-ARM64: long Main.addM1(long) disassembly (after)
+ /// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK: add x{{\d+}}, x{{\d+}}, #0x1
+
+ public static long addM1(long arg) {
+ return (arg + (-1)) | (arg - (-1));
+ }
+
+ public static void main(String[] args) {
+ assertLongEquals(14, addM1(7));
+ }
+}
diff --git a/test/540-checker-rtp-bug/expected.txt b/test/540-checker-rtp-bug/expected.txt
new file mode 100644
index 0000000000..2cf2842aa5
--- /dev/null
+++ b/test/540-checker-rtp-bug/expected.txt
@@ -0,0 +1 @@
+instanceof failed
diff --git a/test/540-checker-rtp-bug/info.txt b/test/540-checker-rtp-bug/info.txt
new file mode 100644
index 0000000000..852cd7c1b4
--- /dev/null
+++ b/test/540-checker-rtp-bug/info.txt
@@ -0,0 +1 @@
+Test that we set the proper types for objects (b/25008765).
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
new file mode 100644
index 0000000000..e9f16c04d9
--- /dev/null
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class Final {
+ public String toString() {
+ return "final";
+ }
+}
+
+public class Main {
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>] klass:Final
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) instruction_simplifier_after_types (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>]
+ /// CHECK: Return [<<Ret>>]
+ public static Final testKeepCheckCast(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return (Final) x;
+ }
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) dead_code_elimination (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+ public static void testKeepInstanceOf(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ if (x instanceof Final) {
+ System.out.println("instanceof succeed");
+ } else {
+ System.out.println("instanceof failed");
+ }
+ }
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) inliner (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+ public static String testNoInline(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return x.toString();
+ }
+
+ public static void main(String[] args) {
+ try {
+ testKeepCheckCast(new Object(), true);
+ throw new Error("Expected check cast exception");
+ } catch (ClassCastException e) {
+ // expected
+ }
+
+ testKeepInstanceOf(new Object(), true);
+
+ if ("final".equals(testNoInline(new Object(), true))) {
+ throw new Error("Bad inlining");
+ }
+ }
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ad64b68ee2..9c04135c71 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -41,8 +41,7 @@ TEST_ART_RUN_TEST_DEPENDENCIES := \
ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
TEST_ART_RUN_TEST_DEPENDENCIES += \
- $(JACK_JAR) \
- $(JACK_LAUNCHER_JAR) \
+ $(JACK) \
$(JILL_JAR)
endif
@@ -61,15 +60,13 @@ define define-build-art-run-test
run_test_options += --build-with-javac-dx
endif
$$(dmart_target): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
-$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES)
+$$(dmart_target): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
JACK=$(abspath $(JACK)) \
- JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
JACK_CLASSPATH=$(TARGET_JACK_CLASSPATH) \
- JACK_JAR=$(abspath $(JACK_JAR)) \
JILL_JAR=$(abspath $(JILL_JAR)) \
$(LOCAL_PATH)/run-test $$(PRIVATE_RUN_TEST_OPTIONS) --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
@@ -214,19 +211,24 @@ TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
055-enum-performance \
133-static-invoke-super
+# disable timing sensitive tests on "dist" builds.
+ifdef dist_goal
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
# Tests that require python3.
TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
961-default-iface-resolution-generated \
964-default-iface-init-generated \
-# disable timing sensitive tests on "dist" builds.
-ifdef dist_goal
- ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
- $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
- $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+# Check if we have python3 to run our tests.
+ifeq ($(wildcard /usr/bin/python3),)
+ $(warning "No python3 found. Disabling tests: $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS)")
- # Currently disable tsts requiring python3.
+ # Currently disable tests requiring python3 when it is not installed.
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
@@ -297,10 +299,14 @@ endif
TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS :=
# Tests that are broken with GC stress.
-# 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
-# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 137-cfi needs to unwind a second forked process. We're using a primitive sleep to wait till we
+# hope the second process got into the expected state. The slowness of gcstress makes this bad.
+# * 961-default-iface-resolution-generated is a very long test that often will take more than the
+# timeout to run when gcstress is enabled. This is because gcstress slows down allocations
+# significantly which this test does a lot.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
- 137-cfi
+ 137-cfi \
+ 961-default-iface-resolution-generated
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
@@ -324,13 +330,15 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUIL
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
-# in tests 138.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# in tests 138. Blacklisted for debug builds since these builds have duplicate classes checks which
+# punt to interpreter.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),debug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
-# 138-duplicate-classes-check. Turned off temporarily, b/21333911.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# 138-duplicate-classes-check. Turned on for debug builds since debug builds have duplicate classes
+# checks enabled, b/2133391.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))
@@ -684,13 +692,13 @@ define define-test-art-run-test
uc_host_or_target := HOST
test_groups := ART_RUN_TEST_HOST_RULES
run_test_options += --host
- prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
+ prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(HOST_JACK_CLASSPATH_DEPENDENCIES)
jack_classpath := $(HOST_JACK_CLASSPATH)
else
ifeq ($(1),target)
uc_host_or_target := TARGET
test_groups := ART_RUN_TEST_TARGET_RULES
- prereq_rule := test-art-target-sync
+ prereq_rule := test-art-target-sync $(TARGET_JACK_CLASSPATH_DEPENDENCIES)
jack_classpath := $(TARGET_JACK_CLASSPATH)
else
$$(error found $(1) expected $(TARGET_TYPES))
@@ -906,9 +914,7 @@ $$(run_test_rule_name): $(TEST_ART_RUN_TEST_DEPENDENCIES) $(HOST_OUT_EXECUTABLES
SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
JACK=$(abspath $(JACK)) \
- JACK_VM_COMMAND="$(JACK_VM) $(DEFAULT_JACK_VM_ARGS) $(JAVA_TMPDIR_ARG) -jar $(abspath $(JACK_LAUNCHER_JAR)) " \
JACK_CLASSPATH=$$(PRIVATE_JACK_CLASSPATH) \
- JACK_JAR=$(abspath $(JACK_JAR)) \
JILL_JAR=$(abspath $(JILL_JAR)) \
art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(12) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
diff --git a/test/run-test b/test/run-test
index 1b71f33209..293779f7c3 100755
--- a/test/run-test
+++ b/test/run-test
@@ -82,24 +82,11 @@ if [ -z "$ANDROID_BUILD_TOP" ]; then
export ANDROID_BUILD_TOP=$oldwd
fi
-# If JACK_VM_COMMAND is not set, assume it launches the prebuilt jack-launcher.
-if [ -z "$JACK_VM_COMMAND" ]; then
- if [ ! -z "$TMPDIR" ]; then
- jack_temp_dir="-Djava.io.tmpdir=$TMPDIR"
- fi
- export JACK_VM_COMMAND="java -Dfile.encoding=UTF-8 -Xms2560m -XX:+TieredCompilation $jack_temp_dir -jar $ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack-launcher.jar"
-fi
-
# If JACK_CLASSPATH is not set, assume it only contains core-libart.
if [ -z "$JACK_CLASSPATH" ]; then
export JACK_CLASSPATH="$ANDROID_BUILD_TOP/out/host/common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/classes.jack"
fi
-# If JACK_JAR is not set, assume it is located in the prebuilts directory.
-if [ -z "$JACK_JAR" ]; then
- export JACK_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jack.jar"
-fi
-
# If JILL_JAR is not set, assume it is located in the prebuilts directory.
if [ -z "$JILL_JAR" ]; then
export JILL_JAR="$ANDROID_BUILD_TOP/prebuilts/sdk/tools/jill.jar"
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 71366c1313..6869b04a0b 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -74,7 +74,7 @@ AHAT_TEST_DUMP_HPROF := $(intermediates.COMMON)/test-dump.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
$(ART_HOST_EXECUTABLES) \
$(HOST_OUT_EXECUTABLES)/art \
- $(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
+ $(HOST_CORE_IMG_OUT_BASE)-optimizing-pic$(CORE_IMG_SUFFIX)
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
index 84bded9281..178e0b5bc5 100644
--- a/tools/checker/common/archs.py
+++ b/tools/checker/common/archs.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-archs_list = ['ARM', 'ARM64', 'MIPS64', 'X86', 'X86_64']
+archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 7ada1896e8..b4f686fde9 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -157,5 +157,12 @@
modes: [device],
names: ["libcore.java.util.TimeZoneTest#testAllDisplayNames"],
bug: 22786792
+},
+{
+ description: "Formatting failures",
+ result: EXEC_FAILED,
+ names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale",
+ "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"],
+ bug: 25136848
}
]