summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk4
-rw-r--r--build/Android.common_build.mk40
-rw-r--r--build/Android.executable.mk7
-rw-r--r--build/Android.gtest.mk12
-rw-r--r--build/Android.oat.mk13
-rw-r--r--compiler/Android.mk69
-rw-r--r--compiler/common_compiler_test.cc75
-rw-r--r--compiler/compiled_method.cc77
-rw-r--r--compiler/compiled_method.h33
-rw-r--r--compiler/compiler.cc52
-rw-r--r--compiler/compiler.h31
-rw-r--r--compiler/dex/bb_optimizations.h35
-rw-r--r--compiler/dex/compiler_enums.h28
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc16
-rw-r--r--compiler/dex/frontend.cc3
-rw-r--r--compiler/dex/frontend.h3
-rw-r--r--compiler/dex/mir_dataflow.cc60
-rw-r--r--compiler/dex/mir_graph.cc51
-rw-r--r--compiler/dex/mir_graph.h88
-rw-r--r--compiler/dex/mir_optimization.cc335
-rw-r--r--compiler/dex/mir_optimization_test.cc294
-rw-r--r--compiler/dex/pass_driver_me_opts.cc1
-rw-r--r--compiler/dex/portable/mir_to_gbc.cc2003
-rw-r--r--compiler/dex/portable/mir_to_gbc.h241
-rw-r--r--compiler/dex/quick/arm/arm_lir.h11
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc4
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h4
-rw-r--r--compiler/dex/quick/arm/int_arm.cc11
-rw-r--r--compiler/dex/quick/arm/target_arm.cc26
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h1
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc24
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h6
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc28
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc31
-rw-r--r--compiler/dex/quick/gen_common.cc21
-rw-r--r--compiler/dex/quick/mir_to_lir.cc62
-rw-r--r--compiler/dex/quick/mir_to_lir.h4
-rw-r--r--compiler/dex/quick/quick_compiler.cc20
-rw-r--r--compiler/dex/ssa_transformation.cc6
-rw-r--r--compiler/dex/verification_results.cc12
-rw-r--r--compiler/dex/verified_method.cc62
-rw-r--r--compiler/dex/verified_method.h7
-rw-r--r--compiler/driver/compiler_driver.cc43
-rw-r--r--compiler/driver/compiler_driver.h12
-rw-r--r--compiler/driver/compiler_driver_test.cc5
-rw-r--r--compiler/driver/compiler_options.h19
-rw-r--r--compiler/elf_writer_mclinker.cc411
-rw-r--r--compiler/elf_writer_mclinker.h98
-rw-r--r--compiler/elf_writer_quick.cc2
-rw-r--r--compiler/elf_writer_test.cc22
-rw-r--r--compiler/image_test.cc5
-rw-r--r--compiler/image_writer.cc40
-rw-r--r--compiler/image_writer.h11
-rw-r--r--compiler/jni/jni_compiler_test.cc41
-rw-r--r--compiler/jni/portable/jni_compiler.cc322
-rw-r--r--compiler/jni/portable/jni_compiler.h87
-rw-r--r--compiler/llvm/art_module.ll153
-rw-r--r--compiler/llvm/backend_options.h50
-rw-r--r--compiler/llvm/backend_types.h104
-rw-r--r--compiler/llvm/compiler_llvm.cc233
-rw-r--r--compiler/llvm/compiler_llvm.h115
-rw-r--r--compiler/llvm/gbc_expander.cc3796
-rw-r--r--compiler/llvm/generated/art_module.cc1096
-rw-r--r--compiler/llvm/intrinsic_func_list.def1796
-rw-r--r--compiler/llvm/intrinsic_helper.cc178
-rw-r--r--compiler/llvm/intrinsic_helper.h157
-rw-r--r--compiler/llvm/ir_builder.cc130
-rw-r--r--compiler/llvm/ir_builder.h486
-rw-r--r--compiler/llvm/llvm_compilation_unit.cc323
-rw-r--r--compiler/llvm/llvm_compilation_unit.h138
-rw-r--r--compiler/llvm/llvm_compiler.cc163
-rw-r--r--compiler/llvm/llvm_compiler.h29
-rw-r--r--compiler/llvm/md_builder.cc117
-rw-r--r--compiler/llvm/md_builder.h71
-rw-r--r--compiler/llvm/runtime_support_builder.cc202
-rw-r--r--compiler/llvm/runtime_support_builder.h98
-rw-r--r--compiler/llvm/runtime_support_builder_arm.cc120
-rw-r--r--compiler/llvm/runtime_support_builder_arm.h42
-rw-r--r--compiler/llvm/runtime_support_builder_x86.cc84
-rw-r--r--compiler/llvm/runtime_support_builder_x86.h42
-rw-r--r--compiler/llvm/runtime_support_llvm_func.h37
-rw-r--r--compiler/llvm/runtime_support_llvm_func_list.h81
-rwxr-xr-xcompiler/llvm/tools/gen_art_module_cc.sh50
-rw-r--r--compiler/llvm/utils_llvm.h32
-rw-r--r--compiler/oat_test.cc47
-rw-r--r--compiler/oat_writer.cc198
-rw-r--r--compiler/oat_writer.h15
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc188
-rw-r--r--compiler/optimizing/builder.cc168
-rw-r--r--compiler/optimizing/builder.h47
-rw-r--r--compiler/optimizing/code_generator.cc13
-rw-r--r--compiler/optimizing/code_generator_arm.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc346
-rw-r--r--compiler/optimizing/code_generator_arm64.h54
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc8
-rw-r--r--compiler/optimizing/codegen_test.cc25
-rw-r--r--compiler/optimizing/constant_folding_test.cc3
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc3
-rw-r--r--compiler/optimizing/graph_checker_test.cc2
-rw-r--r--compiler/optimizing/graph_visualizer.cc28
-rw-r--r--compiler/optimizing/graph_visualizer.h11
-rw-r--r--compiler/optimizing/gvn_test.cc14
-rw-r--r--compiler/optimizing/inliner.cc208
-rw-r--r--compiler/optimizing/inliner.h56
-rw-r--r--compiler/optimizing/linearize_test.cc4
-rw-r--r--compiler/optimizing/live_ranges_test.cc4
-rw-r--r--compiler/optimizing/liveness_test.cc4
-rw-r--r--compiler/optimizing/nodes.cc79
-rw-r--r--compiler/optimizing/nodes.h76
-rw-r--r--compiler/optimizing/optimizing_compiler.cc145
-rw-r--r--compiler/optimizing/optimizing_compiler.h2
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h113
-rw-r--r--compiler/optimizing/register_allocator.cc10
-rw-r--r--compiler/optimizing/register_allocator.h9
-rw-r--r--compiler/optimizing/register_allocator_test.cc8
-rw-r--r--compiler/optimizing/ssa_test.cc2
-rw-r--r--compiler/optimizing/stack_map_stream.h2
-rw-r--r--compiler/sea_ir/code_gen/code_gen.cc291
-rw-r--r--compiler/sea_ir/code_gen/code_gen.h171
-rw-r--r--compiler/sea_ir/code_gen/code_gen_data.cc104
-rw-r--r--compiler/sea_ir/debug/dot_gen.cc173
-rw-r--r--compiler/sea_ir/debug/dot_gen.h121
-rw-r--r--compiler/sea_ir/frontend.cc93
-rw-r--r--compiler/sea_ir/ir/instruction_nodes.h248
-rw-r--r--compiler/sea_ir/ir/instruction_tools.cc797
-rw-r--r--compiler/sea_ir/ir/instruction_tools.h125
-rw-r--r--compiler/sea_ir/ir/regions_test.cc58
-rw-r--r--compiler/sea_ir/ir/sea.cc681
-rw-r--r--compiler/sea_ir/ir/sea.h353
-rw-r--r--compiler/sea_ir/ir/sea_node.h77
-rw-r--r--compiler/sea_ir/ir/visitor.h87
-rw-r--r--compiler/sea_ir/types/type_data_test.cc40
-rw-r--r--compiler/sea_ir/types/type_inference.cc187
-rw-r--r--compiler/sea_ir/types/type_inference.h91
-rw-r--r--compiler/sea_ir/types/type_inference_visitor.cc109
-rw-r--r--compiler/sea_ir/types/type_inference_visitor.h81
-rw-r--r--compiler/sea_ir/types/type_inference_visitor_test.cc132
-rw-r--r--compiler/sea_ir/types/types.h58
-rw-r--r--compiler/trampolines/trampoline_compiler.cc9
-rw-r--r--compiler/utils/scoped_hashtable.h71
-rw-r--r--compiler/utils/scoped_hashtable_test.cc68
-rw-r--r--dex2oat/dex2oat.cc89
-rw-r--r--disassembler/disassembler_arm.cc2
-rw-r--r--disassembler/disassembler_x86.cc511
-rw-r--r--disassembler/disassembler_x86.h7
-rw-r--r--oatdump/oatdump.cc44
-rw-r--r--patchoat/patchoat.cc6
-rw-r--r--runtime/Android.mk27
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc7
-rw-r--r--runtime/arch/arm/portable_entrypoints_arm.S162
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S23
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc7
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S40
-rw-r--r--runtime/arch/mips/asm_support_mips.S49
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc7
-rw-r--r--runtime/arch/mips/portable_entrypoints_mips.S132
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S118
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc7
-rw-r--r--runtime/arch/x86/portable_entrypoints_x86.S131
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S20
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc9
-rw-r--r--runtime/arch/x86_64/portable_entrypoints_x86_64.S30
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S98
-rw-r--r--runtime/asm_support.h8
-rw-r--r--runtime/base/logging.cc2
-rw-r--r--runtime/base/logging.h5
-rw-r--r--runtime/base/macros.h2
-rw-r--r--runtime/base/mutex-inl.h8
-rw-r--r--runtime/base/mutex.cc24
-rw-r--r--runtime/base/mutex.h12
-rw-r--r--runtime/class_linker.cc184
-rw-r--r--runtime/class_linker.h17
-rw-r--r--runtime/common_runtime_test.h10
-rw-r--r--runtime/common_throws.cc4
-rw-r--r--runtime/debugger.cc179
-rw-r--r--runtime/dex_file_test.cc1
-rw-r--r--runtime/dex_instruction_list.h8
-rw-r--r--runtime/elf_file.cc14
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc11
-rw-r--r--runtime/entrypoints/portable/portable_alloc_entrypoints.cc76
-rw-r--r--runtime/entrypoints/portable/portable_cast_entrypoints.cc57
-rw-r--r--runtime/entrypoints/portable/portable_dexcache_entrypoints.cc53
-rw-r--r--runtime/entrypoints/portable/portable_entrypoints.h44
-rw-r--r--runtime/entrypoints/portable/portable_field_entrypoints.cc245
-rw-r--r--runtime/entrypoints/portable/portable_fillarray_entrypoints.cc35
-rw-r--r--runtime/entrypoints/portable/portable_invoke_entrypoints.cc118
-rw-r--r--runtime/entrypoints/portable/portable_jni_entrypoints.cc99
-rw-r--r--runtime/entrypoints/portable/portable_lock_entrypoints.cc40
-rw-r--r--runtime/entrypoints/portable/portable_thread_entrypoints.cc94
-rw-r--r--runtime/entrypoints/portable/portable_throw_entrypoints.cc128
-rw-r--r--runtime/entrypoints/portable/portable_trampoline_entrypoints.cc496
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc23
-rw-r--r--runtime/entrypoints/runtime_asm_entrypoints.h36
-rw-r--r--runtime/entrypoints_order_test.cc15
-rw-r--r--runtime/exception_test.cc81
-rw-r--r--runtime/gc/accounting/space_bitmap.cc1
-rw-r--r--runtime/gc/heap.cc43
-rw-r--r--runtime/gc/heap.h8
-rw-r--r--runtime/gc/reference_processor.cc2
-rw-r--r--runtime/gc/reference_queue.cc19
-rw-r--r--runtime/gc/reference_queue.h17
-rw-r--r--runtime/gc/reference_queue_test.cc85
-rw-r--r--runtime/gc/space/malloc_space.cc3
-rw-r--r--runtime/globals.h12
-rw-r--r--runtime/instrumentation.cc58
-rw-r--r--runtime/instrumentation.h7
-rw-r--r--runtime/interpreter/interpreter_common.cc22
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc40
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc26
-rw-r--r--runtime/java_vm_ext.cc1
-rw-r--r--runtime/jdwp/jdwp_handler.cc6
-rw-r--r--runtime/jni_internal_test.cc2
-rw-r--r--runtime/mirror/art_method-inl.h19
-rw-r--r--runtime/mirror/art_method.cc60
-rw-r--r--runtime/mirror/art_method.h66
-rw-r--r--runtime/mirror/class-inl.h10
-rw-r--r--runtime/mirror/object.cc1
-rw-r--r--runtime/mirror/object_test.cc4
-rw-r--r--runtime/modifiers.h1
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc9
-rw-r--r--runtime/native_bridge_art_interface.cc1
-rw-r--r--runtime/oat.cc98
-rw-r--r--runtime/oat.h18
-rw-r--r--runtime/oat_file.cc49
-rw-r--r--runtime/oat_file.h23
-rw-r--r--runtime/parsed_options.cc16
-rw-r--r--runtime/profiler.cc2
-rw-r--r--runtime/quick/inline_method_analyser.cc1
-rw-r--r--runtime/quick_exception_handler.h2
-rw-r--r--runtime/reference_table.cc105
-rw-r--r--runtime/reflection.cc6
-rw-r--r--runtime/reflection_test.cc25
-rw-r--r--runtime/runtime.cc89
-rw-r--r--runtime/runtime.h11
-rw-r--r--runtime/runtime_android.cc1
-rw-r--r--runtime/runtime_linux.cc1
-rw-r--r--runtime/stack.cc9
-rw-r--r--runtime/stack.h40
-rw-r--r--runtime/stack_map.h8
-rw-r--r--runtime/thread-inl.h8
-rw-r--r--runtime/thread.cc16
-rw-r--r--runtime/thread.h9
-rw-r--r--runtime/thread_list.cc26
-rw-r--r--runtime/trace.cc2
-rw-r--r--runtime/verifier/method_verifier.cc60
-rw-r--r--runtime/verifier/method_verifier.h9
-rw-r--r--runtime/well_known_classes.cc4
-rw-r--r--runtime/well_known_classes.h2
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc10
-rw-r--r--test/015-switch/expected.txt116
-rw-r--r--test/015-switch/src/Main.java331
-rw-r--r--test/109-suspend-check/src/Main.java36
-rw-r--r--test/118-noimage-dex2oat/expected.txt3
-rw-r--r--test/118-noimage-dex2oat/smali/b_18485243.smali22
-rw-r--r--test/118-noimage-dex2oat/src/Main.java18
-rw-r--r--test/129-ThreadGetId/expected.txt1
-rw-r--r--test/129-ThreadGetId/info.txt1
-rw-r--r--test/129-ThreadGetId/src/Main.java53
-rw-r--r--test/436-shift-constant/expected.txt0
-rw-r--r--test/436-shift-constant/info.txt1
-rw-r--r--test/436-shift-constant/src/Main.java (renamed from runtime/arch/arm64/portable_entrypoints_arm64.S)32
-rw-r--r--test/437-inline/expected.txt0
-rw-r--r--test/437-inline/info.txt1
-rw-r--r--test/437-inline/src/Main.java86
-rw-r--r--test/704-multiply-accumulate/expected.txt1
-rw-r--r--test/704-multiply-accumulate/info.txt1
-rw-r--r--test/704-multiply-accumulate/src/Main.java171
-rw-r--r--test/Android.run-test.mk29
-rw-r--r--tools/art8
-rw-r--r--tools/libcore_failures.txt11
271 files changed, 4680 insertions, 22149 deletions
diff --git a/Android.mk b/Android.mk
index d11d011865..40f5f9a853 100644
--- a/Android.mk
+++ b/Android.mk
@@ -313,11 +313,7 @@ OAT_TARGET_RULES :=
# $(1): input jar or apk target location
define declare-oat-target-target
-ifneq (,$(filter $(1),$(addprefix system/app/,$(addsuffix .apk,$(PRODUCT_DEX_PREOPT_PACKAGES_IN_DATA)))))
-OUT_OAT_FILE := $(call dalvik-cache-out,$(1)/classes.dex)
-else
OUT_OAT_FILE := $(PRODUCT_OUT)/$(basename $(1)).odex
-endif
ifeq ($(ONE_SHOT_MAKEFILE),)
# ONE_SHOT_MAKEFILE is empty for a top level build and we don't want
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 5dd9f1534c..bba48b30fc 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -59,42 +59,11 @@ ART_SMALL_MODE := true
endif
#
-# Used to enable SEA mode
-#
-ART_SEA_IR_MODE := false
-ifneq ($(wildcard art/SEA_IR_ART),)
-$(info Enabling ART_SEA_IR_MODE because of existence of art/SEA_IR_ART)
-ART_SEA_IR_MODE := true
-endif
-ifeq ($(WITH_ART_SEA_IR_MODE), true)
-ART_SEA_IR_MODE := true
-endif
-
-#
-# Used to enable portable mode
-#
-ART_USE_PORTABLE_COMPILER := false
-ifneq ($(wildcard art/USE_PORTABLE_COMPILER),)
-$(info Enabling ART_USE_PORTABLE_COMPILER because of existence of art/USE_PORTABLE_COMPILER)
-ART_USE_PORTABLE_COMPILER := true
-endif
-ifeq ($(WITH_ART_USE_PORTABLE_COMPILER),true)
-$(info Enabling ART_USE_PORTABLE_COMPILER because WITH_ART_USE_PORTABLE_COMPILER=true)
-ART_USE_PORTABLE_COMPILER := true
-endif
-
-#
# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS.
#
art_default_gc_type ?= CMS
art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(art_default_gc_type)
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- LLVM_ROOT_PATH := external/llvm
- # Don't fail a dalvik minimal host build.
- -include $(LLVM_ROOT_PATH)/llvm.mk
-endif
-
ART_HOST_CFLAGS :=
ART_TARGET_CFLAGS :=
@@ -194,7 +163,6 @@ ART_C_INCLUDES := \
external/valgrind/main \
external/vixl/src \
external/zlib \
- frameworks/compile/mclinker/include
# Base set of cflags used by all things ART.
art_cflags := \
@@ -229,14 +197,14 @@ ifeq ($(ART_SMALL_MODE),true)
art_cflags += -DART_SMALL_MODE=1
endif
-ifeq ($(ART_SEA_IR_MODE),true)
- art_cflags += -DART_SEA_IR_MODE=1
-endif
-
ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
art_cflags += -DART_USE_OPTIMIZING_COMPILER=1
endif
+ifeq ($(ART_HEAP_POISONING),true)
+ art_cflags += -DART_HEAP_POISONING=1
+endif
+
# Cflags for non-debug ART and ART tools.
art_non_debug_cflags := \
-O3
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index ae42136e9e..be97e8205e 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -20,9 +20,6 @@ ART_HOST_EXECUTABLES ?=
ART_TARGET_EXECUTABLES ?=
ART_EXECUTABLES_CFLAGS :=
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- ART_EXECUTABLES_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
-endif
# $(1): executable ("d" will be appended for debug version)
# $(2): source
@@ -66,9 +63,9 @@ define build-art-executable
endif
LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS)
- # Mac OS linker doesn't understand --export-dynamic/--version-script.
+ # Mac OS linker doesn't understand --export-dynamic.
ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host)
- LOCAL_LDFLAGS := -Wl,--version-script,art/sigchainlib/version-script.txt -Wl,--export-dynamic
+ LOCAL_LDFLAGS := -Wl,--export-dynamic
endif
ifeq ($$(art_target_or_host),target)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 340304a937..5567d15fa3 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -102,6 +102,7 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/gc/accounting/card_table_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
+ runtime/gc/reference_queue_test.cc \
runtime/gc/space/dlmalloc_space_base_test.cc \
runtime/gc/space/dlmalloc_space_static_test.cc \
runtime/gc/space/dlmalloc_space_random_test.cc \
@@ -171,14 +172,6 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/utils/arm64/managed_register_arm64_test.cc \
compiler/utils/x86/managed_register_x86_test.cc \
-ifeq ($(ART_SEA_IR_MODE),true)
-COMPILER_GTEST_COMMON_SRC_FILES += \
- compiler/utils/scoped_hashtable_test.cc \
- compiler/sea_ir/types/type_data_test.cc \
- compiler/sea_ir/types/type_inference_visitor_test.cc \
- compiler/sea_ir/ir/regions_test.cc
-endif
-
RUNTIME_GTEST_TARGET_SRC_FILES := \
$(RUNTIME_GTEST_COMMON_SRC_FILES)
@@ -197,9 +190,6 @@ COMPILER_GTEST_HOST_SRC_FILES := \
compiler/utils/x86_64/assembler_x86_64_test.cc
ART_TEST_CFLAGS :=
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- ART_TEST_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
-endif
include $(CLEAR_VARS)
LOCAL_MODULE := libart-gtest
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 9dbd2f4b45..d4fd5190a0 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -161,20 +161,11 @@ define create-core-oat-target-rules
# a dex2oat change to catch regressions early.
ifeq ($(ART_USE_OPTIMIZING_COMPILER), true)
core_dex2oat_dependency := $(DEX2OAT)
- ifeq ($($(3)TARGET_ARCH),arm64)
- # TODO: Enable image generation on arm64 once the backend
- # is on par with other architectures.
- core_compile_options += --compiler-filter=interpret-only
- endif
endif
ifeq ($(1),optimizing)
- ifeq ($($(3)TARGET_ARCH),arm64)
- core_compile_options += --compiler-filter=interpret-only
- else
- core_compile_options += --compiler-backend=Optimizing
- core_dex2oat_dependency := $(DEX2OAT)
- endif
+ core_compile_options += --compiler-backend=Optimizing
+ core_dex2oat_dependency := $(DEX2OAT)
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index a75417bcbc..8bcc2f99ec 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -84,7 +84,6 @@ LIBART_COMPILER_SRC_FILES := \
jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
jni/quick/jni_compiler.cc \
- llvm/llvm_compiler.cc \
optimizing/builder.cc \
optimizing/bounds_check_elimination.cc \
optimizing/code_generator.cc \
@@ -97,6 +96,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/graph_checker.cc \
optimizing/graph_visualizer.cc \
optimizing/gvn.cc \
+ optimizing/inliner.cc \
optimizing/instruction_simplifier.cc \
optimizing/locations.cc \
optimizing/nodes.cc \
@@ -137,38 +137,8 @@ LIBART_COMPILER_SRC_FILES := \
output_stream.cc \
vector_output_stream.cc
-ifeq ($(ART_SEA_IR_MODE),true)
-LIBART_COMPILER_SRC_FILES += \
- sea_ir/frontend.cc \
- sea_ir/ir/instruction_tools.cc \
- sea_ir/ir/sea.cc \
- sea_ir/code_gen/code_gen.cc \
- sea_ir/code_gen/code_gen_data.cc \
- sea_ir/types/type_inference.cc \
- sea_ir/types/type_inference_visitor.cc \
- sea_ir/debug/dot_gen.cc
-endif
-
LIBART_COMPILER_CFLAGS :=
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
-LIBART_COMPILER_SRC_FILES += \
- dex/portable/mir_to_gbc.cc \
- elf_writer_mclinker.cc \
- jni/portable/jni_compiler.cc \
- llvm/compiler_llvm.cc \
- llvm/gbc_expander.cc \
- llvm/generated/art_module.cc \
- llvm/intrinsic_helper.cc \
- llvm/ir_builder.cc \
- llvm/llvm_compilation_unit.cc \
- llvm/md_builder.cc \
- llvm/runtime_support_builder.cc \
- llvm/runtime_support_builder_arm.cc \
- llvm/runtime_support_builder_x86.cc
-LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
-endif
-
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
dex/quick/arm/arm_lir.h \
dex/quick/arm64/arm64_lir.h \
@@ -249,28 +219,6 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endif
endif
- ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- LOCAL_SHARED_LIBRARIES += libLLVM
- LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
- ifeq ($$(art_target_or_host),target)
- LOCAL_STATIC_LIBRARIES_arm += libmcldARMInfo libmcldARMTarget
- LOCAL_STATIC_LIBRARIES_x86 += libmcldX86Info libmcldX86Target
- LOCAL_STATIC_LIBRARIES_x86_64 += libmcldX86Info libmcldX86Target
- LOCAL_STATIC_LIBRARIES_mips += libmcldMipsInfo libmcldMipsTarget
- ifeq ($(TARGET_ARCH),arm64)
- $$(info TODOAArch64: $$(LOCAL_PATH)/Android.mk Add Arm64 specific MCLinker libraries)
- endif # TARGET_ARCH != arm64
- include $(LLVM_DEVICE_BUILD_MK)
- else # host
- LOCAL_STATIC_LIBRARIES += libmcldARMInfo libmcldARMTarget
- LOCAL_STATIC_LIBRARIES += libmcldX86Info libmcldX86Target
- LOCAL_STATIC_LIBRARIES += libmcldMipsInfo libmcldMipsTarget
- include $(LLVM_HOST_BUILD_MK)
- endif
- LOCAL_STATIC_LIBRARIES += libmcldCore libmcldObject libmcldADT libmcldFragment libmcldTarget libmcldCodeGen libmcldLDVariant libmcldMC libmcldSupport libmcldLD libmcldScript
- include $(LLVM_GEN_INTRINSICS_MK)
- endif
-
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
ifeq ($$(art_target_or_host),host)
@@ -323,18 +271,3 @@ endif
ifeq ($(ART_BUILD_TARGET_DEBUG),true)
$(eval $(call build-libart-compiler,target,debug))
endif
-
-# Rule to build /system/lib/libcompiler_rt.a
-# Usually static libraries are not installed on the device.
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
-ifeq ($(ART_BUILD_TARGET),true)
-# TODO: Move to external/compiler_rt
-$(eval $(call copy-one-file, $(call intermediates-dir-for,STATIC_LIBRARIES,libcompiler_rt,,)/libcompiler_rt.a, $(TARGET_OUT_SHARED_LIBRARIES)/libcompiler_rt.a))
-ifdef TARGET_2ND_ARCH
-$(eval $(call copy-one-file, $(call intermediates-dir-for,STATIC_LIBRARIES,libcompiler_rt,,,t)/libcompiler_rt.a, $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libcompiler_rt.a))
-endif
-
-$(DEX2OAT): $(TARGET_OUT_SHARED_LIBRARIES)/libcompiler_rt.a
-
-endif
-endif
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index a3d9a0bd6d..059a9eea50 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -49,50 +49,43 @@ void CommonCompilerTest::MakeExecutable(mirror::ArtMethod* method) {
}
if (compiled_method != nullptr) {
const std::vector<uint8_t>* code = compiled_method->GetQuickCode();
- const void* code_ptr;
- bool is_portable = (code == nullptr);
- if (!is_portable) {
- uint32_t code_size = code->size();
- CHECK_NE(0u, code_size);
- const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size();
- const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
- uint32_t mapping_table_offset = mapping_table.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
- const std::vector<uint8_t>& gc_map = *compiled_method->GetGcMap();
- uint32_t gc_map_offset = gc_map.empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size() + gc_map.size();
- OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
- compiled_method->GetFrameSizeInBytes(),
- compiled_method->GetCoreSpillMask(),
- compiled_method->GetFpSpillMask(), code_size);
-
- header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
- std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table.size() +
- gc_map.size();
- size_t code_offset = compiled_method->AlignCode(size - code_size);
- size_t padding = code_offset - (size - code_size);
- chunk->reserve(padding + size);
- chunk->resize(sizeof(method_header));
- memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
- chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
- chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
- chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
- chunk->insert(chunk->begin(), padding, 0);
- chunk->insert(chunk->end(), code->begin(), code->end());
- CHECK_EQ(padding + size, chunk->size());
- code_ptr = &(*chunk)[code_offset];
- } else {
- code = compiled_method->GetPortableCode();
- code_ptr = &(*code)[0];
- }
+ uint32_t code_size = code->size();
+ CHECK_NE(0u, code_size);
+ const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size();
+ const std::vector<uint8_t>& mapping_table = *compiled_method->GetMappingTable();
+ uint32_t mapping_table_offset = mapping_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
+ const std::vector<uint8_t>& gc_map = *compiled_method->GetGcMap();
+ uint32_t gc_map_offset = gc_map.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size() + gc_map.size();
+ OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
+ compiled_method->GetFrameSizeInBytes(),
+ compiled_method->GetCoreSpillMask(),
+ compiled_method->GetFpSpillMask(), code_size);
+
+ header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
+ std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
+ size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table.size() +
+ gc_map.size();
+ size_t code_offset = compiled_method->AlignCode(size - code_size);
+ size_t padding = code_offset - (size - code_size);
+ chunk->reserve(padding + size);
+ chunk->resize(sizeof(method_header));
+ memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
+ chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
+ chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
+ chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
+ chunk->insert(chunk->begin(), padding, 0);
+ chunk->insert(chunk->end(), code->begin(), code->end());
+ CHECK_EQ(padding + size, chunk->size());
+ const void* code_ptr = &(*chunk)[code_offset];
MakeExecutable(code_ptr, code->size());
const void* method_code = CompiledMethod::CodePointer(code_ptr,
compiled_method->GetInstructionSet());
LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
- class_linker_->SetEntryPointsToCompiledCode(method, method_code, is_portable);
+ class_linker_->SetEntryPointsToCompiledCode(method, method_code);
} else {
// No code? You must mean to go into the interpreter.
// Or the generic JNI...
@@ -155,7 +148,7 @@ void CommonCompilerTest::SetUp() {
}
// TODO: make selectable
- Compiler::Kind compiler_kind = kUsePortableCompiler ? Compiler::kPortable : Compiler::kQuick;
+ Compiler::Kind compiler_kind = Compiler::kQuick;
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 698bf3b670..060af723a7 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -22,33 +22,11 @@ namespace art {
CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
const std::vector<uint8_t>& quick_code)
: compiler_driver_(compiler_driver), instruction_set_(instruction_set),
- portable_code_(nullptr), quick_code_(nullptr) {
- SetCode(&quick_code, nullptr);
+ quick_code_(nullptr) {
+ SetCode(&quick_code);
}
-CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const std::string& elf_object, const std::string& symbol)
- : compiler_driver_(compiler_driver), instruction_set_(instruction_set),
- portable_code_(nullptr), quick_code_(nullptr), symbol_(symbol) {
- CHECK_NE(elf_object.size(), 0U);
- CHECK_NE(symbol.size(), 0U);
- std::vector<uint8_t> temp_code(elf_object.size());
- for (size_t i = 0; i < elf_object.size(); ++i) {
- temp_code[i] = elf_object[i];
- }
- // TODO: we shouldn't just shove ELF objects in as "code" but
- // change to have different kinds of compiled methods. This is
- // being deferred until we work on hybrid execution or at least
- // until we work on batch compilation.
- SetCode(nullptr, &temp_code);
-}
-
-void CompiledCode::SetCode(const std::vector<uint8_t>* quick_code,
- const std::vector<uint8_t>* portable_code) {
- if (portable_code != nullptr) {
- CHECK(!portable_code->empty());
- portable_code_ = compiler_driver_->DeduplicateCode(*portable_code);
- }
+void CompiledCode::SetCode(const std::vector<uint8_t>* quick_code) {
if (quick_code != nullptr) {
CHECK(!quick_code->empty());
quick_code_ = compiler_driver_->DeduplicateCode(*quick_code);
@@ -64,17 +42,8 @@ bool CompiledCode::operator==(const CompiledCode& rhs) const {
} else {
return std::equal(quick_code_->begin(), quick_code_->end(), rhs.quick_code_->begin());
}
- } else if (portable_code_ != nullptr) {
- if (rhs.portable_code_ == nullptr) {
- return false;
- } else if (portable_code_->size() != rhs.portable_code_->size()) {
- return false;
- } else {
- return std::equal(portable_code_->begin(), portable_code_->end(),
- rhs.portable_code_->begin());
- }
}
- return (rhs.quick_code_ == nullptr) && (rhs.portable_code_ == nullptr);
+ return (rhs.quick_code_ == nullptr);
}
uint32_t CompiledCode::AlignCode(uint32_t offset) const {
@@ -128,13 +97,8 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
}
}
-const std::string& CompiledCode::GetSymbol() const {
- CHECK_NE(0U, symbol_.size());
- return symbol_;
-}
-
const std::vector<uint32_t>& CompiledCode::GetOatdataOffsetsToCompliledCodeOffset() const {
- CHECK_NE(0U, oatdata_offsets_to_compiled_code_offset_.size()) << symbol_;
+ CHECK_NE(0U, oatdata_offsets_to_compiled_code_offset_.size());
return oatdata_offsets_to_compiled_code_offset_;
}
@@ -170,14 +134,13 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& stack_map)
: CompiledCode(driver, instruction_set, quick_code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
- mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
+ mapping_table_(nullptr),
vmap_table_(driver->DeduplicateVMapTable(stack_map)),
gc_map_(nullptr),
cfi_info_(nullptr),
@@ -202,32 +165,4 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
patches_() {
}
-// Constructs a CompiledMethod for the Portable compiler.
-CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set,
- const std::string& code, const std::vector<uint8_t>& gc_map,
- const std::string& symbol)
- : CompiledCode(driver, instruction_set, code, symbol),
- frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0),
- src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
- mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
- vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
- gc_map_(driver->DeduplicateGCMap(gc_map)),
- cfi_info_(nullptr),
- patches_() {
-}
-
-CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set,
- const std::string& code, const std::string& symbol)
- : CompiledCode(driver, instruction_set, code, symbol),
- frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0),
- src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
- mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
- vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
- gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
- cfi_info_(nullptr),
- patches_() {
-}
-
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 7f76eef682..d93db03806 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -40,23 +40,15 @@ class CompiledCode {
CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
const std::vector<uint8_t>& quick_code);
- // For Portable to supply an ELF object
- CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const std::string& elf_object, const std::string &symbol);
-
InstructionSet GetInstructionSet() const {
return instruction_set_;
}
- const std::vector<uint8_t>* GetPortableCode() const {
- return portable_code_;
- }
-
const std::vector<uint8_t>* GetQuickCode() const {
return quick_code_;
}
- void SetCode(const std::vector<uint8_t>* quick_code, const std::vector<uint8_t>* portable_code);
+ void SetCode(const std::vector<uint8_t>* quick_code);
bool operator==(const CompiledCode& rhs) const;
@@ -77,7 +69,6 @@ class CompiledCode {
static const void* CodePointer(const void* code_pointer,
InstructionSet instruction_set);
- const std::string& GetSymbol() const;
const std::vector<uint32_t>& GetOatdataOffsetsToCompliledCodeOffset() const;
void AddOatdataOffsetToCompliledCodeOffset(uint32_t offset);
@@ -86,15 +77,9 @@ class CompiledCode {
const InstructionSet instruction_set_;
- // The ELF image for portable.
- std::vector<uint8_t>* portable_code_;
-
// Used to store the PIC code for Quick.
std::vector<uint8_t>* quick_code_;
- // Used for the Portable ELF symbol name.
- const std::string symbol_;
-
// There are offsets from the oatdata symbol to where the offset to
// the compiled method will be found. These are computed by the
// OatWriter and then used by the ElfWriter to add relocations so
@@ -291,7 +276,6 @@ class CompiledMethod FINAL : public CompiledCode {
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table);
// Constructs a CompiledMethod for the QuickJniCompiler.
@@ -303,14 +287,6 @@ class CompiledMethod FINAL : public CompiledCode {
const uint32_t fp_spill_mask,
const std::vector<uint8_t>* cfi_info);
- // Constructs a CompiledMethod for the Portable compiler.
- CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const std::string& code,
- const std::vector<uint8_t>& gc_map, const std::string& symbol);
-
- // Constructs a CompiledMethod for the Portable JniCompiler.
- CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const std::string& code,
- const std::string& symbol);
-
~CompiledMethod() {}
size_t GetFrameSizeInBytes() const {
@@ -330,9 +306,8 @@ class CompiledMethod FINAL : public CompiledCode {
return *src_mapping_table_;
}
- const std::vector<uint8_t>& GetMappingTable() const {
- DCHECK(mapping_table_ != nullptr);
- return *mapping_table_;
+ std::vector<uint8_t> const* GetMappingTable() const {
+ return mapping_table_;
}
const std::vector<uint8_t>& GetVmapTable() const {
@@ -367,7 +342,7 @@ class CompiledMethod FINAL : public CompiledCode {
// For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
std::vector<uint8_t>* vmap_table_;
// For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
- // are live. For portable code, the key is a dalvik PC.
+ // are live.
std::vector<uint8_t>* gc_map_;
// For quick code, a FDE entry for the debug_frame section.
std::vector<uint8_t>* cfi_info_;
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index b9fcf5bab6..baa6688570 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -19,55 +19,10 @@
#include "base/logging.h"
#include "dex/quick/quick_compiler.h"
#include "driver/compiler_driver.h"
-#include "llvm/llvm_compiler.h"
#include "optimizing/optimizing_compiler.h"
namespace art {
-#ifdef ART_SEA_IR_MODE
-constexpr bool kCanUseSeaIR = true;
-#else
-constexpr bool kCanUseSeaIR = false;
-#endif
-
-extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
- uint32_t access_flags ATTRIBUTE_UNUSED,
- art::InvokeType invoke_type ATTRIBUTE_UNUSED,
- uint16_t class_def_idx ATTRIBUTE_UNUSED,
- uint32_t method_idx ATTRIBUTE_UNUSED,
- jobject class_loader ATTRIBUTE_UNUSED,
- const art::DexFile& dex_file ATTRIBUTE_UNUSED)
-#ifdef ART_SEA_IR_MODE
-; // NOLINT(whitespace/semicolon)
-#else
-{
- UNREACHABLE();
-}
-#endif
-
-
-CompiledMethod* Compiler::TryCompileWithSeaIR(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file) {
- bool use_sea = kCanUseSeaIR &&
- (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
- return nullptr;
-}
-
Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
switch (kind) {
case kQuick:
@@ -76,13 +31,6 @@ Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
case kOptimizing:
return CreateOptimizingCompiler(driver);
- case kPortable:
- {
- Compiler* compiler = CreateLLVMCompiler(driver);
- CHECK(compiler != nullptr) << "Portable compiler not compiled";
- return compiler;
- }
-
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
diff --git a/compiler/compiler.h b/compiler/compiler.h
index c2c15ff9cf..07e2fd611f 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -32,19 +32,11 @@ namespace mirror {
class ArtMethod;
}
-// Base class for compiler-specific thread-local storage for compiler worker threads
-class CompilerTls {
- public:
- CompilerTls() {}
- ~CompilerTls() {}
-};
-
class Compiler {
public:
enum Kind {
kQuick,
- kOptimizing,
- kPortable
+ kOptimizing
};
static Compiler* Create(CompilerDriver* driver, Kind kind);
@@ -64,14 +56,6 @@ class Compiler {
jobject class_loader,
const DexFile& dex_file) const = 0;
- static CompiledMethod* TryCompileWithSeaIR(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-
virtual CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file) const = 0;
@@ -92,15 +76,6 @@ class Compiler {
return maximum_compilation_time_before_warning_;
}
- virtual bool IsPortable() const {
- return false;
- }
-
- void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
- UNUSED(driver);
- UNUSED(filename);
- }
-
virtual void InitCompilationUnit(CompilationUnit& cu) const = 0;
virtual ~Compiler() {}
@@ -119,10 +94,6 @@ class Compiler {
return nullptr;
}
- virtual CompilerTls* CreateNewCompilerTls() {
- return nullptr;
- }
-
// Returns whether the method to compile is such a pathological case that
// it's not worth compiling.
static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 764a4cf08e..0407e323cb 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -297,6 +297,41 @@ class BBOptimizations : public PassME {
void Start(PassDataHolder* data) const;
};
+/**
+ * @class SuspendCheckElimination
+ * @brief Any simple BasicBlock optimization can be put here.
+ */
+class SuspendCheckElimination : public PassME {
+ public:
+ SuspendCheckElimination()
+ : PassME("SuspendCheckElimination", kTopologicalSortTraversal, "6_post_sce_cfg") {
+ }
+
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return c_unit->mir_graph->EliminateSuspendChecksGate();
+ }
+
+ bool Worker(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ return c_unit->mir_graph->EliminateSuspendChecks(bb);
+ }
+
+ void End(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->EliminateSuspendChecksEnd();
+ }
+};
+
} // namespace art
#endif // ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index a3fe8ad038..7ff06a04cb 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -311,6 +311,34 @@ enum ExtendedMIROpcode {
// arg[0]: TypeSize (most other vector opcodes have this in vC)
kMirOpPackedArrayPut,
+ // @brief Multiply-add integer.
+ // vA: destination
+ // vB: multiplicand
+ // vC: multiplier
+ // arg[0]: addend
+ kMirOpMaddInt,
+
+ // @brief Multiply-subtract integer.
+ // vA: destination
+ // vB: multiplicand
+ // vC: multiplier
+ // arg[0]: minuend
+ kMirOpMsubInt,
+
+ // @brief Multiply-add long.
+ // vA: destination
+ // vB: multiplicand
+ // vC: multiplier
+ // arg[0]: addend
+ kMirOpMaddLong,
+
+ // @brief Multiply-subtract long.
+ // vA: destination
+ // vB: multiplicand
+ // vC: multiplier
+ // arg[0]: minuend
+ kMirOpMsubLong,
+
kMirOpLast,
};
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 205a5218f2..f7968c225a 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -120,6 +120,22 @@ void DexCompiler::Compile() {
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false);
break;
+ case Instruction::IGET_BOOLEAN:
+ CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN_QUICK, false);
+ break;
+
+ case Instruction::IGET_BYTE:
+ CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE_QUICK, false);
+ break;
+
+ case Instruction::IGET_CHAR:
+ CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR_QUICK, false);
+ break;
+
+ case Instruction::IGET_SHORT:
+ CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT_QUICK, false);
+ break;
+
case Instruction::IPUT:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
break;
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 3f6231cb1f..dd8b4c8a3d 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -47,6 +47,7 @@ static uint32_t kCompilerOptimizerDisableFlags = 0 | // Disable specific optimi
// (1 << kTrackLiveTemps) |
// (1 << kSafeOptimizations) |
// (1 << kBBOpt) |
+ // (1 << kSuspendCheckElimination) |
// (1 << kMatch) |
// (1 << kPromoteCompilerTemps) |
// (1 << kSuppressExceptionEdges) |
@@ -68,8 +69,6 @@ static uint32_t kCompilerDebugFlags = 0 | // Enable debug/testing modes
// (1 << kDebugShowNops) |
// (1 << kDebugCountOpcodes) |
// (1 << kDebugDumpCheckStats) |
- // (1 << kDebugDumpBitcodeFile) |
- // (1 << kDebugVerifyBitcode) |
// (1 << kDebugShowSummaryMemoryUsage) |
// (1 << kDebugShowFilterStats) |
// (1 << kDebugTimings) |
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index bed3b97304..4266535053 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -46,6 +46,7 @@ enum opt_control_vector {
kTrackLiveTemps,
kSafeOptimizations,
kBBOpt,
+ kSuspendCheckElimination,
kMatch,
kPromoteCompilerTemps,
kBranchFusing,
@@ -69,8 +70,6 @@ enum debugControlVector {
kDebugShowNops,
kDebugCountOpcodes,
kDebugDumpCheckStats,
- kDebugDumpBitcodeFile,
- kDebugVerifyBitcode,
kDebugShowSummaryMemoryUsage,
kDebugShowFilterStats,
kDebugTimings,
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 5b7ac3ca1b..6704112281 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -897,6 +897,18 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
// 120 MirOpPackedArrayPut
DF_UB | DF_UC | DF_NULL_CHK_B | DF_RANGE_CHK_C | DF_REF_B | DF_CORE_C | DF_LVN,
+
+ // 121 MirOpMaddInt
+ DF_FORMAT_EXTENDED,
+
+ // 122 MirOpMsubInt
+ DF_FORMAT_EXTENDED,
+
+ // 123 MirOpMaddLong
+ DF_FORMAT_EXTENDED,
+
+ // 124 MirOpMsubLong
+ DF_FORMAT_EXTENDED,
};
/* Return the base virtual register for a SSA name */
@@ -906,7 +918,7 @@ int MIRGraph::SRegToVReg(int ssa_reg) const {
/* Any register that is used before being defined is considered live-in */
void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
- ArenaBitVector* live_in_v, int dalvik_reg_id) {
+ ArenaBitVector* live_in_v, int dalvik_reg_id) {
use_v->SetBit(dalvik_reg_id);
if (!def_v->IsBitSet(dalvik_reg_id)) {
live_in_v->SetBit(dalvik_reg_id);
@@ -919,8 +931,8 @@ void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id) {
}
void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
- ArenaBitVector* live_in_v,
- const MIR::DecodedInstruction& d_insn) {
+ ArenaBitVector* live_in_v,
+ const MIR::DecodedInstruction& d_insn) {
// For vector MIRs, vC contains type information
bool is_vector_type_wide = false;
int type_size = d_insn.vC >> 16;
@@ -951,6 +963,24 @@ void MIRGraph::HandleExtended(ArenaBitVector* use_v, ArenaBitVector* def_v,
HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
}
break;
+ case kMirOpMaddInt:
+ case kMirOpMsubInt:
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
+ HandleDef(def_v, d_insn.vA);
+ break;
+ case kMirOpMaddLong:
+ case kMirOpMsubLong:
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vB + 1);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.vC + 1);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0]);
+ HandleLiveInUse(use_v, def_v, live_in_v, d_insn.arg[0] + 1);
+ HandleDef(def_v, d_insn.vA);
+ HandleDef(def_v, d_insn.vA + 1);
+ break;
default:
LOG(ERROR) << "Unexpected Extended Opcode " << d_insn.opcode;
break;
@@ -1139,6 +1169,28 @@ void MIRGraph::DataFlowSSAFormatExtended(MIR* mir) {
HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
}
break;
+ case kMirOpMaddInt:
+ case kMirOpMsubInt:
+ AllocateSSAUseData(mir, 3);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 1);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 2);
+ AllocateSSADefData(mir, 1);
+ HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
+ break;
+ case kMirOpMaddLong:
+ case kMirOpMsubLong:
+ AllocateSSAUseData(mir, 6);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vB, 0);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vB + 1, 1);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vC, 2);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.vC + 1, 3);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0], 4);
+ HandleSSAUse(mir->ssa_rep->uses, d_insn.arg[0] + 1, 5);
+ AllocateSSADefData(mir, 2);
+ HandleSSADef(mir->ssa_rep->defs, d_insn.vA, 0);
+ HandleSSADef(mir->ssa_rep->defs, d_insn.vA + 1, 1);
+ break;
default:
LOG(ERROR) << "Missing case for extended MIR: " << mir->dalvikInsn.opcode;
break;
@@ -1343,7 +1395,7 @@ void MIRGraph::CompilerInitializeSSAConversion() {
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-void MIRGraph::CountUses(class BasicBlock* bb) {
+void MIRGraph::CountUses(BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index b17f5064d5..71ad635ac4 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -70,6 +70,10 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
"MemBarrier",
"PackedArrayGet",
"PackedArrayPut",
+ "MaddInt",
+ "MsubInt",
+ "MaddLong",
+ "MsubLong",
};
MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
@@ -127,7 +131,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
ifield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
- gen_suspend_test_list_(arena->Adapter()) {
+ suspend_checks_in_loops_(nullptr) {
memset(&temp_, 0, sizeof(temp_));
use_counts_.reserve(256);
raw_use_counts_.reserve(256);
@@ -1386,6 +1390,27 @@ void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir
}
FillTypeSizeString(mir->dalvikInsn.arg[0], decoded_mir);
break;
+ case kMirOpMaddInt:
+ case kMirOpMsubInt:
+ case kMirOpMaddLong:
+ case kMirOpMsubLong:
+ if (ssa_rep != nullptr) {
+ decoded_mir->append(" ");
+ decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
+ if (defs > 1) {
+ decoded_mir->append(", ");
+ decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
+ }
+ for (int i = 0; i < uses; i++) {
+ decoded_mir->append(", ");
+ decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
+ }
+ } else {
+ decoded_mir->append(StringPrintf(" v%d, v%d, v%d, v%d",
+ mir->dalvikInsn.vA, mir->dalvikInsn.vB,
+ mir->dalvikInsn.vC, mir->dalvikInsn.arg[0]));
+ }
+ break;
default:
break;
}
@@ -1947,6 +1972,7 @@ void MIRGraph::ComputeTopologicalSortOrder() {
DCHECK_EQ(bb->hidden, false);
DCHECK_EQ(bb->visited, false);
bb->visited = true;
+ bb->nesting_depth = loop_head_stack.size();
// Now add the basic block.
uint16_t idx = static_cast<uint16_t>(topological_order_.size());
@@ -1988,24 +2014,6 @@ bool BasicBlock::IsExceptionBlock() const {
return false;
}
-bool MIRGraph::HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id) {
- BasicBlock* target = GetBasicBlock(target_id);
-
- if (source == nullptr || target == nullptr)
- return false;
-
- int idx;
- for (idx = gen_suspend_test_list_.size() - 1; idx >= 0; idx--) {
- BasicBlock* bb = gen_suspend_test_list_[idx];
- if (bb == source)
- return true; // The block has been inserted by a suspend check before.
- if (source->dominators->IsBitSet(bb->id) && bb->dominators->IsBitSet(target_id))
- return true;
- }
-
- return false;
-}
-
ChildBlockIterator::ChildBlockIterator(BasicBlock* bb, MIRGraph* mir_graph)
: basic_block_(bb), mir_graph_(mir_graph), visited_fallthrough_(false),
visited_taken_(false), have_successors_(false) {
@@ -2476,6 +2484,11 @@ int MIR::DecodedInstruction::FlagsOf() const {
return Instruction::kContinue | Instruction::kThrow;
case kMirOpPackedArrayPut:
return Instruction::kContinue | Instruction::kThrow;
+ case kMirOpMaddInt:
+ case kMirOpMsubInt:
+ case kMirOpMaddLong:
+ case kMirOpMsubLong:
+ return Instruction::kContinue;
default:
LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode);
return 0;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index db1cf4ba00..851ca150b5 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -34,6 +34,7 @@
namespace art {
+class DexFileMethodInliner;
class GlobalValueNumbering;
enum DataFlowAttributePos {
@@ -131,11 +132,9 @@ enum DataFlowAttributePos {
enum OatMethodAttributes {
kIsLeaf, // Method is leaf.
- kHasLoop, // Method contains simple loop.
};
#define METHOD_IS_LEAF (1 << kIsLeaf)
-#define METHOD_HAS_LOOP (1 << kHasLoop)
// Minimum field size to contain Dalvik v_reg number.
#define VREG_NUM_WIDTH 16
@@ -731,6 +730,10 @@ class MIRGraph {
return max_nested_loops_;
}
+ bool IsLoopHead(BasicBlockId bb_id) {
+ return topological_order_loop_ends_[topological_order_indexes_[bb_id]] != 0u;
+ }
+
bool IsConst(int32_t s_reg) const {
return is_constant_v_->IsBitSet(s_reg);
}
@@ -969,13 +972,23 @@ class MIRGraph {
return reg_location_[method_sreg_];
}
- bool IsBackedge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
- return ((target_bb_id != NullBasicBlockId) &&
- (GetBasicBlock(target_bb_id)->start_offset <= branch_bb->start_offset));
+ bool IsBackEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+ DCHECK_NE(target_bb_id, NullBasicBlockId);
+ DCHECK_LT(target_bb_id, topological_order_indexes_.size());
+ DCHECK_LT(branch_bb->id, topological_order_indexes_.size());
+ return topological_order_indexes_[target_bb_id] <= topological_order_indexes_[branch_bb->id];
}
- bool IsBackwardsBranch(BasicBlock* branch_bb) {
- return IsBackedge(branch_bb, branch_bb->taken) || IsBackedge(branch_bb, branch_bb->fall_through);
+ bool IsSuspendCheckEdge(BasicBlock* branch_bb, BasicBlockId target_bb_id) {
+ if (!IsBackEdge(branch_bb, target_bb_id)) {
+ return false;
+ }
+ if (suspend_checks_in_loops_ == nullptr) {
+ // We didn't run suspend check elimination.
+ return true;
+ }
+ uint16_t target_depth = GetBasicBlock(target_bb_id)->nesting_depth;
+ return (suspend_checks_in_loops_[branch_bb->id] & (1u << (target_depth - 1u))) == 0;
}
void CountBranch(DexOffset target_offset) {
@@ -1055,6 +1068,9 @@ class MIRGraph {
bool ApplyGlobalValueNumberingGate();
bool ApplyGlobalValueNumbering(BasicBlock* bb);
void ApplyGlobalValueNumberingEnd();
+ bool EliminateSuspendChecksGate();
+ bool EliminateSuspendChecks(BasicBlock* bb);
+ void EliminateSuspendChecksEnd();
uint16_t GetGvnIFieldId(MIR* mir) const {
DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
@@ -1166,7 +1182,7 @@ class MIRGraph {
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(class BasicBlock* bb);
+ void CountUses(BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1210,20 +1226,6 @@ class MIRGraph {
void HandleSSADef(int* defs, int dalvik_reg, int reg_index);
bool InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed);
- // Used for removing redudant suspend tests
- void AppendGenSuspendTestList(BasicBlock* bb) {
- if (gen_suspend_test_list_.size() == 0 ||
- gen_suspend_test_list_.back() != bb) {
- gen_suspend_test_list_.push_back(bb);
- }
- }
-
- /* This is used to check if there is already a method call dominating the
- * source basic block of a backedge and being dominated by the target basic
- * block of the backedge.
- */
- bool HasSuspendTestBetween(BasicBlock* source, BasicBlockId target_id);
-
protected:
int FindCommonParent(int block1, int block2);
void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
@@ -1263,6 +1265,34 @@ class MIRGraph {
void ComputeDomPostOrderTraversal(BasicBlock* bb);
int GetSSAUseCount(int s_reg);
bool BasicBlockOpt(BasicBlock* bb);
+ void MultiplyAddOpt(BasicBlock* bb);
+
+ /**
+ * @brief Check whether the given MIR is possible to throw an exception.
+ * @param mir The mir to check.
+ * @return Returns 'true' if the given MIR might throw an exception.
+ */
+ bool CanThrow(MIR* mir);
+ /**
+ * @brief Combine multiply and add/sub MIRs into corresponding extended MAC MIR.
+ * @param mul_mir The multiply MIR to be combined.
+ * @param add_mir The add/sub MIR to be combined.
+ * @param mul_is_first_addend 'true' if multiply product is the first addend of add operation.
+ * @param is_wide 'true' if the operations are long type.
+ * @param is_sub 'true' if it is a multiply-subtract operation.
+ */
+ void CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
+ bool is_wide, bool is_sub);
+ /*
+ * @brief Check whether the first MIR anti-depends on the second MIR.
+ * @details To check whether one of first MIR's uses of vregs is redefined by the second MIR,
+ * i.e. there is a write-after-read dependency.
+ * @param first The first MIR.
+ * @param second The second MIR.
+ * @param Returns true if there is a write-after-read dependency.
+ */
+ bool HasAntiDependency(MIR* first, MIR* second);
+
bool BuildExtendedBBList(class BasicBlock* bb);
bool FillDefBlockMatrix(BasicBlock* bb);
void InitializeDominationInfo(BasicBlock* bb);
@@ -1339,6 +1369,10 @@ class MIRGraph {
uint16_t* ifield_ids_; // Part of GVN/LVN but cached here for LVN to avoid recalculation.
uint16_t* sfield_ids_; // Ditto.
} gvn;
+ // Suspend check elimination.
+ struct {
+ DexFileMethodInliner* inliner;
+ } sce;
} temp_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
@@ -1374,11 +1408,19 @@ class MIRGraph {
ArenaVector<MirIFieldLoweringInfo> ifield_lowering_infos_;
ArenaVector<MirSFieldLoweringInfo> sfield_lowering_infos_;
ArenaVector<MirMethodLoweringInfo> method_lowering_infos_;
+
+ // In the suspend check elimination pass we determine for each basic block and enclosing
+ // loop whether there's guaranteed to be a suspend check on the path from the loop head
+ // to this block. If so, we can eliminate the back-edge suspend check.
+ // The bb->id is index into suspend_checks_in_loops_ and the loop head's depth is bit index
+ // in a suspend_checks_in_loops_[bb->id].
+ uint32_t* suspend_checks_in_loops_;
+
static const uint64_t oat_data_flow_attributes_[kMirOpLast];
- ArenaVector<BasicBlock*> gen_suspend_test_list_; // List of blocks containing suspend tests
friend class MirOptimizationTest;
friend class ClassInitCheckEliminationTest;
+ friend class SuspendCheckEliminationTest;
friend class NullCheckEliminationTest;
friend class GlobalValueNumberingTest;
friend class LocalValueNumberingTest;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index fd4c3d7d77..6e9844cb7f 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -426,6 +426,10 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
if (bb->block_type == kDead) {
return true;
}
+ // Currently multiply-accumulate backend supports are only available on arm32 and arm64.
+ if (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2) {
+ MultiplyAddOpt(bb);
+ }
bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kLocalValueNumbering)) == 0u;
std::unique_ptr<ScopedArenaAllocator> allocator;
std::unique_ptr<GlobalValueNumbering> global_valnum;
@@ -542,36 +546,13 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
}
break;
- case Instruction::RETURN_VOID:
- case Instruction::RETURN:
- case Instruction::RETURN_WIDE:
- case Instruction::RETURN_OBJECT:
- if (bb->GetFirstNonPhiInsn() == mir) {
- // This is a simple return BB. Eliminate suspend checks on predecessor back-edges.
- for (BasicBlockId pred_id : bb->predecessors) {
- BasicBlock* pred_bb = GetBasicBlock(pred_id);
- DCHECK(pred_bb != nullptr);
- if (IsBackedge(pred_bb, bb->id) && pred_bb->last_mir_insn != nullptr &&
- (IsInstructionIfCc(pred_bb->last_mir_insn->dalvikInsn.opcode) ||
- IsInstructionIfCcZ(pred_bb->last_mir_insn->dalvikInsn.opcode) ||
- IsInstructionGoto(pred_bb->last_mir_insn->dalvikInsn.opcode))) {
- pred_bb->last_mir_insn->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
- if (cu_->verbose) {
- LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
- << pred_bb->last_mir_insn->offset;
- }
- }
- }
- }
- break;
default:
break;
}
// Is this the select pattern?
// TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here.
// TUNING: expand to support IF_xx compare & branches
- if (!cu_->compiler->IsPortable() &&
- (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
+ if ((cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
@@ -592,12 +573,8 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
(Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
/*
- * Okay - we have the basic diamond shape. At the very least, we can eliminate the
- * suspend check on the taken-taken branch back to the join point.
+ * Okay - we have the basic diamond shape.
*/
- if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
- tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
- }
// TODO: Add logic for LONG.
// Are the block bodies something we can handle?
@@ -1636,4 +1613,304 @@ void MIRGraph::BasicBlockOptimization() {
temp_scoped_alloc_.reset();
}
+bool MIRGraph::EliminateSuspendChecksGate() {
+ if ((cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
+ GetMaxNestedLoops() == 0u || // Nothing to do.
+ GetMaxNestedLoops() >= 32u || // Only 32 bits in suspend_checks_in_loops_[.].
+ // Exclude 32 as well to keep bit shifts well-defined.
+ !HasInvokes()) { // No invokes to actually eliminate any suspend checks.
+ return false;
+ }
+ if (cu_->compiler_driver != nullptr && cu_->compiler_driver->GetMethodInlinerMap() != nullptr) {
+ temp_.sce.inliner =
+ cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
+ }
+ suspend_checks_in_loops_ = static_cast<uint32_t*>(
+ arena_->Alloc(GetNumBlocks() * sizeof(*suspend_checks_in_loops_), kArenaAllocMisc));
+ return true;
+}
+
+bool MIRGraph::EliminateSuspendChecks(BasicBlock* bb) {
+ if (bb->block_type != kDalvikByteCode) {
+ return false;
+ }
+ DCHECK_EQ(GetTopologicalSortOrderLoopHeadStack()->size(), bb->nesting_depth);
+ if (bb->nesting_depth == 0u) {
+ // Out of loops.
+ DCHECK_EQ(suspend_checks_in_loops_[bb->id], 0u); // The array was zero-initialized.
+ return false;
+ }
+ uint32_t suspend_checks_in_loops = (1u << bb->nesting_depth) - 1u; // Start with all loop heads.
+ bool found_invoke = false;
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (IsInstructionInvoke(mir->dalvikInsn.opcode) &&
+ (temp_.sce.inliner == nullptr ||
+ !temp_.sce.inliner->IsIntrinsic(mir->dalvikInsn.vB, nullptr))) {
+ // Non-intrinsic invoke, rely on a suspend point in the invoked method.
+ found_invoke = true;
+ break;
+ }
+ }
+ if (!found_invoke) {
+ // Intersect suspend checks from predecessors.
+ uint16_t bb_topo_idx = topological_order_indexes_[bb->id];
+ uint32_t pred_mask_union = 0u;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ uint16_t pred_topo_idx = topological_order_indexes_[pred_id];
+ if (pred_topo_idx < bb_topo_idx) {
+ // Determine the loop depth of the predecessors relative to this block.
+ size_t pred_loop_depth = topological_order_loop_head_stack_.size();
+ while (pred_loop_depth != 0u &&
+ pred_topo_idx < topological_order_loop_head_stack_[pred_loop_depth - 1].first) {
+ --pred_loop_depth;
+ }
+ DCHECK_LE(pred_loop_depth, GetBasicBlock(pred_id)->nesting_depth);
+ uint32_t pred_mask = (1u << pred_loop_depth) - 1u;
+ // Intersect pred_mask bits in suspend_checks_in_loops with
+ // suspend_checks_in_loops_[pred_id].
+ uint32_t pred_loops_without_checks = pred_mask & ~suspend_checks_in_loops_[pred_id];
+ suspend_checks_in_loops = suspend_checks_in_loops & ~pred_loops_without_checks;
+ pred_mask_union |= pred_mask;
+ }
+ }
+ DCHECK_EQ(((1u << (IsLoopHead(bb->id) ? bb->nesting_depth - 1u: bb->nesting_depth)) - 1u),
+ pred_mask_union);
+ suspend_checks_in_loops &= pred_mask_union;
+ }
+ suspend_checks_in_loops_[bb->id] = suspend_checks_in_loops;
+ if (suspend_checks_in_loops == 0u) {
+ return false;
+ }
+ // Apply MIR_IGNORE_SUSPEND_CHECK if appropriate.
+ if (bb->taken != NullBasicBlockId) {
+ DCHECK(bb->last_mir_insn != nullptr);
+ DCHECK(IsInstructionIfCc(bb->last_mir_insn->dalvikInsn.opcode) ||
+ IsInstructionIfCcZ(bb->last_mir_insn->dalvikInsn.opcode) ||
+ IsInstructionGoto(bb->last_mir_insn->dalvikInsn.opcode) ||
+ (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) >= kMirOpFusedCmplFloat &&
+ static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) <= kMirOpFusedCmpLong));
+ if (!IsSuspendCheckEdge(bb, bb->taken) &&
+ (bb->fall_through == NullBasicBlockId || !IsSuspendCheckEdge(bb, bb->fall_through))) {
+ bb->last_mir_insn->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
+ }
+ } else if (bb->fall_through != NullBasicBlockId && IsSuspendCheckEdge(bb, bb->fall_through)) {
+ // We've got a fall-through suspend edge. Add an artificial GOTO to force suspend check.
+ MIR* mir = NewMIR();
+ mir->dalvikInsn.opcode = Instruction::GOTO;
+ mir->dalvikInsn.vA = 0; // Branch offset.
+ mir->offset = GetBasicBlock(bb->fall_through)->start_offset;
+ mir->m_unit_index = current_method_;
+ mir->ssa_rep = reinterpret_cast<SSARepresentation*>(
+ arena_->Alloc(sizeof(SSARepresentation), kArenaAllocDFInfo)); // Zero-initialized.
+ bb->AppendMIR(mir);
+ std::swap(bb->fall_through, bb->taken); // The fall-through has become taken.
+ }
+ return true;
+}
+
+void MIRGraph::EliminateSuspendChecksEnd() {
+ temp_.sce.inliner = nullptr;
+}
+
+bool MIRGraph::CanThrow(MIR* mir) {
+ if ((mir->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
+ return false;
+ }
+ const int opt_flags = mir->optimization_flags;
+ uint64_t df_attributes = GetDataFlowAttributes(mir);
+
+ if (((df_attributes & DF_HAS_NULL_CHKS) != 0) && ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
+ return true;
+ }
+ if ((df_attributes & DF_IFIELD) != 0) {
+ // The IGET/IPUT family.
+ const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(mir);
+ bool fast = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
+ // Already processed null check above.
+ if (fast) {
+ return false;
+ }
+ } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
+ // The AGET/APUT family.
+ // Already processed null check above.
+ if ((opt_flags & MIR_IGNORE_RANGE_CHECK) != 0) {
+ return false;
+ }
+ } else if ((df_attributes & DF_SFIELD) != 0) {
+ // The SGET/SPUT family.
+ const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
+ bool fast = (df_attributes & DF_DA) ? field_info.FastGet() : field_info.FastPut();
+ bool is_class_initialized = field_info.IsClassInitialized() ||
+ ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0);
+ if (fast && is_class_initialized) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MIRGraph::HasAntiDependency(MIR* first, MIR* second) {
+ DCHECK(first->ssa_rep != nullptr);
+ DCHECK(second->ssa_rep != nullptr);
+ if ((second->ssa_rep->num_defs > 0) && (first->ssa_rep->num_uses > 0)) {
+ int vreg0 = SRegToVReg(second->ssa_rep->defs[0]);
+ int vreg1 = (second->ssa_rep->num_defs == 2) ?
+ SRegToVReg(second->ssa_rep->defs[1]) : INVALID_VREG;
+ for (int i = 0; i < first->ssa_rep->num_uses; i++) {
+ int32_t use = SRegToVReg(first->ssa_rep->uses[i]);
+ if (use == vreg0 || use == vreg1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void MIRGraph::CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
+ bool is_wide, bool is_sub) {
+ if (is_wide) {
+ if (is_sub) {
+ add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubLong);
+ } else {
+ add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddLong);
+ }
+ } else {
+ if (is_sub) {
+ add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubInt);
+ } else {
+ add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddInt);
+ }
+ }
+ add_mir->ssa_rep->num_uses = is_wide ? 6 : 3;
+ int32_t addend0 = INVALID_SREG;
+ int32_t addend1 = INVALID_SREG;
+ if (is_wide) {
+ addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[2] : add_mir->ssa_rep->uses[0];
+ addend1 = mul_is_first_addend ? add_mir->ssa_rep->uses[3] : add_mir->ssa_rep->uses[1];
+ } else {
+ addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[1] : add_mir->ssa_rep->uses[0];
+ }
+
+ AllocateSSAUseData(add_mir, add_mir->ssa_rep->num_uses);
+ add_mir->ssa_rep->uses[0] = mul_mir->ssa_rep->uses[0];
+ add_mir->ssa_rep->uses[1] = mul_mir->ssa_rep->uses[1];
+ // Clear the original multiply product ssa use count, as it is not used anymore.
+ raw_use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
+ use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
+ if (is_wide) {
+ DCHECK_EQ(add_mir->ssa_rep->num_uses, 6);
+ add_mir->ssa_rep->uses[2] = mul_mir->ssa_rep->uses[2];
+ add_mir->ssa_rep->uses[3] = mul_mir->ssa_rep->uses[3];
+ add_mir->ssa_rep->uses[4] = addend0;
+ add_mir->ssa_rep->uses[5] = addend1;
+ raw_use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
+ use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
+ } else {
+ DCHECK_EQ(add_mir->ssa_rep->num_uses, 3);
+ add_mir->ssa_rep->uses[2] = addend0;
+ }
+ // Copy in the decoded instruction information.
+ add_mir->dalvikInsn.vB = SRegToVReg(add_mir->ssa_rep->uses[0]);
+ if (is_wide) {
+ add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[2]);
+ add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[4]);
+ } else {
+ add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[1]);
+ add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[2]);
+ }
+ // Original multiply MIR is set to Nop.
+ mul_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+}
+
+void MIRGraph::MultiplyAddOpt(BasicBlock* bb) {
+ if (bb->block_type == kDead) {
+ return;
+ }
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+ ScopedArenaSafeMap<uint32_t, MIR*> ssa_mul_map(std::less<uint32_t>(), allocator.Adapter());
+ ScopedArenaSafeMap<uint32_t, MIR*>::iterator map_it;
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ Instruction::Code opcode = mir->dalvikInsn.opcode;
+ bool is_sub = true;
+ bool is_candidate_multiply = false;
+ switch (opcode) {
+ case Instruction::MUL_INT:
+ case Instruction::MUL_INT_2ADDR:
+ is_candidate_multiply = true;
+ break;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ if (cu_->target64) {
+ is_candidate_multiply = true;
+ }
+ break;
+ case Instruction::ADD_INT:
+ case Instruction::ADD_INT_2ADDR:
+ is_sub = false;
+ FALLTHROUGH_INTENDED;
+ case Instruction::SUB_INT:
+ case Instruction::SUB_INT_2ADDR:
+ if (((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end()) && !is_sub) {
+ // a*b+c
+ CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
+ false /* is_wide */, false /* is_sub */);
+ ssa_mul_map.erase(mir->ssa_rep->uses[0]);
+ } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[1])) != ssa_mul_map.end()) {
+ // c+a*b or c-a*b
+ CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
+ false /* is_wide */, is_sub);
+ ssa_mul_map.erase(map_it);
+ }
+ break;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ is_sub = false;
+ FALLTHROUGH_INTENDED;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ if (!cu_->target64) {
+ break;
+ }
+ if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end() && !is_sub) {
+ // a*b+c
+ CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
+ true /* is_wide */, false /* is_sub */);
+ ssa_mul_map.erase(map_it);
+ } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[2])) != ssa_mul_map.end()) {
+ // c+a*b or c-a*b
+ CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
+ true /* is_wide */, is_sub);
+ ssa_mul_map.erase(map_it);
+ }
+ break;
+ default:
+ if (!ssa_mul_map.empty() && CanThrow(mir)) {
+ // Should not combine multiply and add MIRs across potential exception.
+ ssa_mul_map.clear();
+ }
+ break;
+ }
+
+ // Exclude the case when an MIR writes a vreg which is previous candidate multiply MIR's uses.
+ // It is because that current RA may allocate the same physical register to them. For this
+ // kind of cases, the multiplier has been updated, we should not use updated value to the
+ // multiply-add insn.
+ if (ssa_mul_map.size() > 0) {
+ for (auto it = ssa_mul_map.begin(); it != ssa_mul_map.end();) {
+ MIR* mul = it->second;
+ if (HasAntiDependency(mul, mir)) {
+ it = ssa_mul_map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ if (is_candidate_multiply &&
+ (GetRawUseCount(mir->ssa_rep->defs[0]) == 1) && (mir->next != nullptr)) {
+ ssa_mul_map.Put(mir->ssa_rep->defs[0], mir);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index c794cc61f8..6c2e9c0b27 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -88,6 +88,8 @@ class MirOptimizationTest : public testing::Test {
{ bb, opcode, 0u, vA, vB, vC }
#define DEF_INVOKE(bb, opcode, vC, method_info) \
{ bb, opcode, method_info, 0u, 0u, vC }
+#define DEF_OTHER0(bb, opcode) \
+ { bb, opcode, 0u, 0u, 0u, 0u }
#define DEF_OTHER1(bb, opcode, vA) \
{ bb, opcode, 0u, vA, 0u, 0u }
#define DEF_OTHER2(bb, opcode, vA, vB) \
@@ -175,6 +177,56 @@ class MirOptimizationTest : public testing::Test {
PrepareBasicBlocks(bbs);
}
+ void PrepareNestedLoopsWhile_While() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(8)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED2(3, 7)), // Outer while loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)), // Inner while loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)), // "taken" loops to inner head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(5)), // "taken" loops to outer head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareNestedLoopsWhile_WhileWhile() {
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(10)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 10), DEF_PRED2(3, 9)), // Outer while loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)), // Inner while loop head 1.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(5)), // Loops to inner head 1.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(5, 8)), // Inner while loop head 2.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED1(7)), // loops to inner head 2.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(7)), // loops to outer head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
+ void PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge() {
+ // Extra edge from the first inner loop body to second inner loop body (6u->8u).
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(10)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 10), DEF_PRED2(3, 9)), // Outer while loop head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(6, 7), DEF_PRED2(4, 6)), // Inner while loop head 1.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 8), DEF_PRED1(5)), // Loops to inner head 1.
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(8, 9), DEF_PRED2(5, 8)), // Inner while loop head 2.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(7), DEF_PRED2(7, 6)), // loops to inner head 2.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(7)), // loops to outer head.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ PrepareBasicBlocks(bbs);
+ }
+
void PrepareCatch() {
static const BBDef bbs[] = {
DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
@@ -397,6 +449,43 @@ class NullCheckEliminationTest : public MirOptimizationTest {
}
};
+class SuspendCheckEliminationTest : public MirOptimizationTest {
+ protected:
+ bool IsBackEdge(BasicBlockId branch_bb, BasicBlockId target_bb) {
+ BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb);
+ return target_bb != NullBasicBlockId && cu_.mir_graph->IsBackEdge(branch, target_bb);
+ }
+
+ bool IsSuspendCheckEdge(BasicBlockId branch_bb, BasicBlockId target_bb) {
+ BasicBlock* branch = cu_.mir_graph->GetBasicBlock(branch_bb);
+ return cu_.mir_graph->IsSuspendCheckEdge(branch, target_bb);
+ }
+
+ void PerformSuspendCheckElimination() {
+ cu_.mir_graph->SSATransformationStart();
+ cu_.mir_graph->ComputeDFSOrders();
+ cu_.mir_graph->ComputeDominators();
+ cu_.mir_graph->ComputeTopologicalSortOrder();
+ cu_.mir_graph->SSATransformationEnd();
+ bool gate_result = cu_.mir_graph->EliminateSuspendChecksGate();
+ ASSERT_TRUE(gate_result);
+ TopologicalSortIterator iterator(cu_.mir_graph.get());
+ bool change = false;
+ for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
+ change = cu_.mir_graph->EliminateSuspendChecks(bb);
+ }
+ cu_.mir_graph->EliminateSuspendChecksEnd();
+ }
+
+ SuspendCheckEliminationTest()
+ : MirOptimizationTest() {
+ static const MethodDef methods[] = {
+ { 0u, 1u, 0u, 0u, kDirect, kDirect, false, false }, // Dummy.
+ };
+ PrepareMethods(methods);
+ }
+};
+
TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
static const SFieldDef sfields[] = {
{ 0u, 1u, 0u, 0u, kDexMemAccessWord },
@@ -882,7 +971,208 @@ TEST_F(NullCheckEliminationTest, Catch) {
}
}
-// Undefine MIR_DEF for null check elimination.
-#undef MIR_DEF
+TEST_F(SuspendCheckEliminationTest, LoopNoElimination) {
+ static const MIRDef mirs[] = {
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u, 0u), // Force the pass to run.
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge back.
+ };
+
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(4u, 4u));
+ EXPECT_TRUE(IsSuspendCheckEdge(4u, 4u)); // Suspend point on loop to self.
+}
+
+TEST_F(SuspendCheckEliminationTest, LoopElimination) {
+ static const MIRDef mirs[] = {
+ DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in the loop.
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge back.
+ };
+
+ PrepareLoop();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(4u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(4u, 4u)); // No suspend point on loop to self.
+}
+
+TEST_F(SuspendCheckEliminationTest, While_While_NoElimination) {
+ static const MIRDef mirs[] = {
+ DEF_INVOKE(3u, Instruction::INVOKE_STATIC, 0u, 0u), // Force the pass to run.
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER0(7u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_While();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(7u, 4u));
+ EXPECT_TRUE(IsSuspendCheckEdge(7u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_While_InvokeInOuterLoopHead) {
+ static const MIRDef mirs[] = {
+ DEF_INVOKE(4u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in outer loop head.
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER0(7u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_While();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(7u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_While_InvokeInOuterLoopBody) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_INVOKE(7u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in outer loop body.
+ DEF_OTHER0(7u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_While();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(7u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_While_InvokeInInnerLoopHead) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in inner loop head.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER0(7u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_While();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(7u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(7u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_While_InvokeInInnerLoopBody) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop.
+ DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in inner loop body.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER0(7u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_While();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(7u, 4u));
+ EXPECT_TRUE(IsSuspendCheckEdge(7u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_WhileWhile_InvokeInFirstInnerLoopHead) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_INVOKE(5u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in first inner loop head.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 1.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER1(7u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 2.
+ DEF_OTHER0(8u, Instruction::GOTO), // Edge back to inner loop 2 head.
+ DEF_OTHER0(9u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_WhileWhile();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(8u, 7u));
+ EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u));
+ ASSERT_TRUE(IsBackEdge(9u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(9u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_WhileWhile_InvokeInFirstInnerLoopBody) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 1.
+ DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in first inner loop body.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER1(7u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 2.
+ DEF_OTHER0(8u, Instruction::GOTO), // Edge back to inner loop 2 head.
+ DEF_OTHER0(9u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_WhileWhile();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(8u, 7u));
+ EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u));
+ ASSERT_TRUE(IsBackEdge(9u, 4u));
+ EXPECT_TRUE(IsSuspendCheckEdge(9u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_WhileWhile_WithExtraEdge_InvokeInFirstInnerLoopBody) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 1.
+ DEF_INVOKE(6u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in first inner loop body.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_OTHER1(7u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 2.
+ DEF_OTHER0(8u, Instruction::GOTO), // Edge back to inner loop 2 head.
+ DEF_OTHER0(9u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_FALSE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(8u, 7u));
+ EXPECT_TRUE(IsSuspendCheckEdge(8u, 7u)); // Unaffected by the extra edge.
+ ASSERT_TRUE(IsBackEdge(9u, 4u));
+ EXPECT_TRUE(IsSuspendCheckEdge(9u, 4u));
+}
+
+TEST_F(SuspendCheckEliminationTest, While_WhileWhile_WithExtraEdge_InvokeInSecondInnerLoopHead) {
+ static const MIRDef mirs[] = {
+ DEF_OTHER1(4u, Instruction::IF_NEZ, 1u), // Edge out of outer loop.
+ DEF_OTHER1(5u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 1.
+ DEF_OTHER0(6u, Instruction::GOTO), // Edge back to inner loop head.
+ DEF_INVOKE(7u, Instruction::INVOKE_STATIC, 0u, 0u), // Invoke in second inner loop head.
+ DEF_OTHER1(7u, Instruction::IF_NEZ, 2u), // Edge out of inner loop 2.
+ DEF_OTHER0(8u, Instruction::GOTO), // Edge back to inner loop 2 head.
+ DEF_OTHER0(9u, Instruction::GOTO), // Edge back to outer loop head.
+ };
+
+ PrepareNestedLoopsWhile_WhileWhile_WithExtraEdge();
+ PrepareMIRs(mirs);
+ PerformSuspendCheckElimination();
+ ASSERT_TRUE(IsBackEdge(6u, 5u));
+ EXPECT_TRUE(IsSuspendCheckEdge(6u, 5u));
+ ASSERT_TRUE(IsBackEdge(8u, 7u));
+ EXPECT_FALSE(IsSuspendCheckEdge(8u, 7u)); // Unaffected by the extra edge.
+ ASSERT_TRUE(IsBackEdge(9u, 4u));
+ EXPECT_FALSE(IsSuspendCheckEdge(9u, 4u));
+}
} // namespace art
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index a2bf8b4aab..c476b2aabc 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -46,6 +46,7 @@ const Pass* const PassDriver<PassDriverMEOpts>::g_passes[] = {
GetPassInstance<TypeInference>(),
GetPassInstance<GlobalValueNumberingPass>(),
GetPassInstance<BBOptimizations>(),
+ GetPassInstance<SuspendCheckElimination>(),
};
// The number of the passes in the initial list of Passes (g_passes).
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
deleted file mode 100644
index ba255e0a76..0000000000
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ /dev/null
@@ -1,2003 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "object_utils.h"
-
-#include <llvm/ADT/DepthFirstIterator.h>
-#include <llvm/Analysis/Verifier.h>
-#include <llvm/Bitcode/ReaderWriter.h>
-#include <llvm/IR/Instruction.h>
-#include <llvm/IR/Instructions.h>
-#include <llvm/IR/Metadata.h>
-#include <llvm/IR/Type.h>
-#include <llvm/Support/Casting.h>
-#include <llvm/Support/InstIterator.h>
-#include <llvm/Support/ToolOutputFile.h>
-
-#include "dex/compiler_internals.h"
-#include "dex/dataflow_iterator-inl.h"
-#include "dex/frontend.h"
-#include "llvm/ir_builder.h"
-#include "llvm/llvm_compilation_unit.h"
-#include "llvm/utils_llvm.h"
-#include "mir_to_gbc.h"
-#include "thread-inl.h"
-
-const char* kLabelFormat = "%c0x%x_%d";
-const char kInvalidBlock = 0xff;
-const char kNormalBlock = 'L';
-const char kCatchBlock = 'C';
-
-namespace art {
-namespace llvm {
-::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
-}
-
-LLVMInfo::LLVMInfo() {
- // Create context, module, intrinsic helper & ir builder
- llvm_context_.reset(new ::llvm::LLVMContext());
- llvm_module_ = new ::llvm::Module("art", *llvm_context_);
- ::llvm::StructType::create(*llvm_context_, "JavaObject");
- art::llvm::makeLLVMModuleContents(llvm_module_);
- intrinsic_helper_.reset(new art::llvm::IntrinsicHelper(*llvm_context_, *llvm_module_));
- ir_builder_.reset(new art::llvm::IRBuilder(*llvm_context_, *llvm_module_, *intrinsic_helper_));
-}
-
-LLVMInfo::~LLVMInfo() {
-}
-
-::llvm::BasicBlock* MirConverter::GetLLVMBlock(int id) {
- return id_to_block_map_.Get(id);
-}
-
-::llvm::Value* MirConverter::GetLLVMValue(int s_reg) {
- return llvm_values_[s_reg];
-}
-
-void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg) {
- // Set vreg for debugging
- art::llvm::IntrinsicHelper::IntrinsicId id = art::llvm::IntrinsicHelper::SetVReg;
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
- int v_reg = mir_graph_->SRegToVReg(s_reg);
- ::llvm::Value* table_slot = irb_->getInt32(v_reg);
- ::llvm::Value* args[] = { table_slot, val };
- irb_->CreateCall(func, args);
-}
-
-// Replace the placeholder value with the real definition
-void MirConverter::DefineValueOnly(::llvm::Value* val, int s_reg) {
- ::llvm::Value* placeholder = GetLLVMValue(s_reg);
- if (placeholder == NULL) {
- // This can happen on instruction rewrite on verification failure
- LOG(WARNING) << "Null placeholder";
- return;
- }
- placeholder->replaceAllUsesWith(val);
- val->takeName(placeholder);
- llvm_values_[s_reg] = val;
- ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
- DCHECK(inst != NULL);
- inst->eraseFromParent();
-}
-
-void MirConverter::DefineValue(::llvm::Value* val, int s_reg) {
- DefineValueOnly(val, s_reg);
- SetVregOnValue(val, s_reg);
-}
-
-::llvm::Type* MirConverter::LlvmTypeFromLocRec(RegLocation loc) {
- ::llvm::Type* res = NULL;
- if (loc.wide) {
- if (loc.fp)
- res = irb_->getDoubleTy();
- else
- res = irb_->getInt64Ty();
- } else {
- if (loc.fp) {
- res = irb_->getFloatTy();
- } else {
- if (loc.ref)
- res = irb_->getJObjectTy();
- else
- res = irb_->getInt32Ty();
- }
- }
- return res;
-}
-
-void MirConverter::InitIR() {
- if (llvm_info_ == NULL) {
- CompilerTls* tls = cu_->compiler_driver->GetTls();
- CHECK(tls != NULL);
- llvm_info_ = static_cast<LLVMInfo*>(tls->GetLLVMInfo());
- if (llvm_info_ == NULL) {
- llvm_info_ = new LLVMInfo();
- tls->SetLLVMInfo(llvm_info_);
- }
- }
- context_ = llvm_info_->GetLLVMContext();
- module_ = llvm_info_->GetLLVMModule();
- intrinsic_helper_ = llvm_info_->GetIntrinsicHelper();
- irb_ = llvm_info_->GetIRBuilder();
-}
-
-::llvm::BasicBlock* MirConverter::FindCaseTarget(uint32_t vaddr) {
- BasicBlock* bb = mir_graph_->FindBlock(vaddr);
- DCHECK(bb != NULL);
- return GetLLVMBlock(bb->id);
-}
-
-void MirConverter::ConvertPackedSwitch(BasicBlock* bb, MIR* mir,
- int32_t table_offset, RegLocation rl_src) {
- const Instruction::PackedSwitchPayload* payload =
- reinterpret_cast<const Instruction::PackedSwitchPayload*>(
- mir_graph_->GetTable(mir, table_offset));
-
- ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
-
- ::llvm::SwitchInst* sw =
- irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
- payload->case_count);
-
- for (uint16_t i = 0; i < payload->case_count; ++i) {
- ::llvm::BasicBlock* llvm_bb =
- FindCaseTarget(current_dalvik_offset_ + payload->targets[i]);
- sw->addCase(irb_->getInt32(payload->first_key + i), llvm_bb);
- }
- ::llvm::MDNode* switch_node =
- ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
- sw->setMetadata("SwitchTable", switch_node);
- bb->taken = NullBasicBlockId;
- bb->fall_through = NullBasicBlockId;
-}
-
-void MirConverter::ConvertSparseSwitch(BasicBlock* bb, MIR* mir,
- int32_t table_offset, RegLocation rl_src) {
- const Instruction::SparseSwitchPayload* payload =
- reinterpret_cast<const Instruction::SparseSwitchPayload*>(
- mir_graph_->GetTable(mir, table_offset));
-
- const int32_t* keys = payload->GetKeys();
- const int32_t* targets = payload->GetTargets();
-
- ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
-
- ::llvm::SwitchInst* sw =
- irb_->CreateSwitch(value, GetLLVMBlock(bb->fall_through),
- payload->case_count);
-
- for (size_t i = 0; i < payload->case_count; ++i) {
- ::llvm::BasicBlock* llvm_bb =
- FindCaseTarget(current_dalvik_offset_ + targets[i]);
- sw->addCase(irb_->getInt32(keys[i]), llvm_bb);
- }
- ::llvm::MDNode* switch_node =
- ::llvm::MDNode::get(*context_, irb_->getInt32(table_offset));
- sw->setMetadata("SwitchTable", switch_node);
- bb->taken = NullBasicBlockId;
- bb->fall_through = NullBasicBlockId;
-}
-
-void MirConverter::ConvertSget(int32_t field_index,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) {
- ::llvm::Constant* field_idx = irb_->getInt32(field_index);
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res = irb_->CreateCall(intr, field_idx);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertSput(int32_t field_index,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src) {
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(field_index));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(intr, args);
-}
-
-void MirConverter::ConvertFillArrayData(int32_t offset, RegLocation rl_array) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::HLFillArrayData;
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(offset));
- args.push_back(GetLLVMValue(rl_array.orig_sreg));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(intr, args);
-}
-
-::llvm::Value* MirConverter::EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
- RegLocation loc) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- if (loc.wide) {
- if (loc.fp) {
- id = art::llvm::IntrinsicHelper::ConstDouble;
- } else {
- id = art::llvm::IntrinsicHelper::ConstLong;
- }
- } else {
- if (loc.fp) {
- id = art::llvm::IntrinsicHelper::ConstFloat;
- } else if (loc.ref) {
- id = art::llvm::IntrinsicHelper::ConstObj;
- } else {
- id = art::llvm::IntrinsicHelper::ConstInt;
- }
- }
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- return irb_->CreateCall(intr, src);
-}
-
-void MirConverter::EmitPopShadowFrame() {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::PopShadowFrame);
- irb_->CreateCall(intr);
-}
-
-::llvm::Value* MirConverter::EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
- RegLocation loc) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- if (loc.wide) {
- if (loc.fp) {
- id = art::llvm::IntrinsicHelper::CopyDouble;
- } else {
- id = art::llvm::IntrinsicHelper::CopyLong;
- }
- } else {
- if (loc.fp) {
- id = art::llvm::IntrinsicHelper::CopyFloat;
- } else if (loc.ref) {
- id = art::llvm::IntrinsicHelper::CopyObj;
- } else {
- id = art::llvm::IntrinsicHelper::CopyInt;
- }
- }
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- return irb_->CreateCall(intr, src);
-}
-
-void MirConverter::ConvertMoveException(RegLocation rl_dest) {
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::GetException);
- ::llvm::Value* res = irb_->CreateCall(func);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertThrow(RegLocation rl_src) {
- ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::HLThrowException);
- irb_->CreateCall(func, src);
-}
-
-void MirConverter::ConvertMonitorEnterExit(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_src) {
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(func, args);
-}
-
-void MirConverter::ConvertArrayLength(int opt_flags,
- RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::OptArrayLength);
- ::llvm::Value* res = irb_->CreateCall(func, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::EmitSuspendCheck() {
- art::llvm::IntrinsicHelper::IntrinsicId id =
- art::llvm::IntrinsicHelper::CheckSuspend;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(intr);
-}
-
-::llvm::Value* MirConverter::ConvertCompare(ConditionCode cc,
- ::llvm::Value* src1, ::llvm::Value* src2) {
- ::llvm::Value* res = NULL;
- DCHECK_EQ(src1->getType(), src2->getType());
- switch (cc) {
- case kCondEq: res = irb_->CreateICmpEQ(src1, src2); break;
- case kCondNe: res = irb_->CreateICmpNE(src1, src2); break;
- case kCondLt: res = irb_->CreateICmpSLT(src1, src2); break;
- case kCondGe: res = irb_->CreateICmpSGE(src1, src2); break;
- case kCondGt: res = irb_->CreateICmpSGT(src1, src2); break;
- case kCondLe: res = irb_->CreateICmpSLE(src1, src2); break;
- default: LOG(FATAL) << "Unexpected cc value " << cc;
- }
- return res;
-}
-
-void MirConverter::ConvertCompareAndBranch(BasicBlock* bb, MIR* mir,
- ConditionCode cc, RegLocation rl_src1, RegLocation rl_src2) {
- if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
- EmitSuspendCheck();
- }
- ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
- ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
- ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
- cond_value->setName(StringPrintf("t%d", temp_name_++));
- irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
- GetLLVMBlock(bb->fall_through));
- // Don't redo the fallthrough branch in the BB driver
- bb->fall_through = NullBasicBlockId;
-}
-
-void MirConverter::ConvertCompareZeroAndBranch(BasicBlock* bb,
- MIR* mir, ConditionCode cc, RegLocation rl_src1) {
- if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= mir->offset) {
- EmitSuspendCheck();
- }
- ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
- ::llvm::Value* src2;
- if (rl_src1.ref) {
- src2 = irb_->getJNull();
- } else {
- src2 = irb_->getInt32(0);
- }
- ::llvm::Value* cond_value = ConvertCompare(cc, src1, src2);
- irb_->CreateCondBr(cond_value, GetLLVMBlock(bb->taken),
- GetLLVMBlock(bb->fall_through));
- // Don't redo the fallthrough branch in the BB driver
- bb->fall_through = NullBasicBlockId;
-}
-
-::llvm::Value* MirConverter::GenDivModOp(bool is_div, bool is_long,
- ::llvm::Value* src1, ::llvm::Value* src2) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- if (is_long) {
- if (is_div) {
- id = art::llvm::IntrinsicHelper::DivLong;
- } else {
- id = art::llvm::IntrinsicHelper::RemLong;
- }
- } else {
- if (is_div) {
- id = art::llvm::IntrinsicHelper::DivInt;
- } else {
- id = art::llvm::IntrinsicHelper::RemInt;
- }
- }
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2>args;
- args.push_back(src1);
- args.push_back(src2);
- return irb_->CreateCall(intr, args);
-}
-
-::llvm::Value* MirConverter::GenArithOp(OpKind op, bool is_long,
- ::llvm::Value* src1, ::llvm::Value* src2) {
- ::llvm::Value* res = NULL;
- switch (op) {
- case kOpAdd: res = irb_->CreateAdd(src1, src2); break;
- case kOpSub: res = irb_->CreateSub(src1, src2); break;
- case kOpRsub: res = irb_->CreateSub(src2, src1); break;
- case kOpMul: res = irb_->CreateMul(src1, src2); break;
- case kOpOr: res = irb_->CreateOr(src1, src2); break;
- case kOpAnd: res = irb_->CreateAnd(src1, src2); break;
- case kOpXor: res = irb_->CreateXor(src1, src2); break;
- case kOpDiv: res = GenDivModOp(true, is_long, src1, src2); break;
- case kOpRem: res = GenDivModOp(false, is_long, src1, src2); break;
- case kOpLsl: res = irb_->CreateShl(src1, src2); break;
- case kOpLsr: res = irb_->CreateLShr(src1, src2); break;
- case kOpAsr: res = irb_->CreateAShr(src1, src2); break;
- default:
- LOG(FATAL) << "Invalid op " << op;
- }
- return res;
-}
-
-void MirConverter::ConvertFPArithOp(OpKind op, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
- ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
- ::llvm::Value* res = NULL;
- switch (op) {
- case kOpAdd: res = irb_->CreateFAdd(src1, src2); break;
- case kOpSub: res = irb_->CreateFSub(src1, src2); break;
- case kOpMul: res = irb_->CreateFMul(src1, src2); break;
- case kOpDiv: res = irb_->CreateFDiv(src1, src2); break;
- case kOpRem: res = irb_->CreateFRem(src1, src2); break;
- default:
- LOG(FATAL) << "Invalid op " << op;
- }
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2>args;
- args.push_back(GetLLVMValue(rl_src1.orig_sreg));
- args.push_back(GetLLVMValue(rl_src2.orig_sreg));
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src, int shift_amount) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2>args;
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- args.push_back(irb_->getInt32(shift_amount));
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertArithOp(OpKind op, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
- ::llvm::Value* src2 = GetLLVMValue(rl_src2.orig_sreg);
- DCHECK_EQ(src1->getType(), src2->getType());
- ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertArithOpLit(OpKind op, RegLocation rl_dest,
- RegLocation rl_src1, int32_t imm) {
- ::llvm::Value* src1 = GetLLVMValue(rl_src1.orig_sreg);
- ::llvm::Value* src2 = irb_->getInt32(imm);
- ::llvm::Value* res = GenArithOp(op, rl_dest.wide, src1, src2);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-/*
- * Process arguments for invoke. Note: this code is also used to
- * collect and process arguments for NEW_FILLED_ARRAY and NEW_FILLED_ARRAY_RANGE.
- * The requirements are similar.
- */
-void MirConverter::ConvertInvoke(BasicBlock* bb, MIR* mir,
- InvokeType invoke_type, bool is_range, bool is_filled_new_array) {
- CallInfo* info = mir_graph_->NewMemCallInfo(bb, mir, invoke_type, is_range);
- ::llvm::SmallVector< ::llvm::Value*, 10> args;
- // Insert the invoke_type
- args.push_back(irb_->getInt32(static_cast<int>(invoke_type)));
- // Insert the method_idx
- args.push_back(irb_->getInt32(info->index));
- // Insert the optimization flags
- args.push_back(irb_->getInt32(info->opt_flags));
- // Now, insert the actual arguments
- for (int i = 0; i < info->num_arg_words;) {
- ::llvm::Value* val = GetLLVMValue(info->args[i].orig_sreg);
- args.push_back(val);
- i += info->args[i].wide ? 2 : 1;
- }
- /*
- * Choose the invoke return type based on actual usage. Note: may
- * be different than shorty. For example, if a function return value
- * is not used, we'll treat this as a void invoke.
- */
- art::llvm::IntrinsicHelper::IntrinsicId id;
- if (is_filled_new_array) {
- id = art::llvm::IntrinsicHelper::HLFilledNewArray;
- } else if (info->result.location == kLocInvalid) {
- id = art::llvm::IntrinsicHelper::HLInvokeVoid;
- } else {
- if (info->result.wide) {
- if (info->result.fp) {
- id = art::llvm::IntrinsicHelper::HLInvokeDouble;
- } else {
- id = art::llvm::IntrinsicHelper::HLInvokeLong;
- }
- } else if (info->result.ref) {
- id = art::llvm::IntrinsicHelper::HLInvokeObj;
- } else if (info->result.fp) {
- id = art::llvm::IntrinsicHelper::HLInvokeFloat;
- } else {
- id = art::llvm::IntrinsicHelper::HLInvokeInt;
- }
- }
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- if (info->result.location != kLocInvalid) {
- DefineValue(res, info->result.orig_sreg);
- }
-}
-
-void MirConverter::ConvertConstObject(uint32_t idx,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* index = irb_->getInt32(idx);
- ::llvm::Value* res = irb_->CreateCall(intr, index);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertCheckCast(uint32_t type_idx, RegLocation rl_src) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::HLCheckCast;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(type_idx));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- irb_->CreateCall(intr, args);
-}
-
-void MirConverter::ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::NewInstance;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* index = irb_->getInt32(type_idx);
- ::llvm::Value* res = irb_->CreateCall(intr, index);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertNewArray(uint32_t type_idx,
- RegLocation rl_dest, RegLocation rl_src) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::NewArray;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(type_idx));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertAget(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index) {
- ::llvm::SmallVector< ::llvm::Value*, 3> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_array.orig_sreg));
- args.push_back(GetLLVMValue(rl_index.orig_sreg));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertAput(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_src, RegLocation rl_array, RegLocation rl_index) {
- ::llvm::SmallVector< ::llvm::Value*, 4> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- args.push_back(GetLLVMValue(rl_array.orig_sreg));
- args.push_back(GetLLVMValue(rl_index.orig_sreg));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(intr, args);
-}
-
-void MirConverter::ConvertIget(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_obj, int field_index) {
- ::llvm::SmallVector< ::llvm::Value*, 3> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_obj.orig_sreg));
- args.push_back(irb_->getInt32(field_index));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertIput(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_src, RegLocation rl_obj, int field_index) {
- ::llvm::SmallVector< ::llvm::Value*, 4> args;
- args.push_back(irb_->getInt32(opt_flags));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- args.push_back(GetLLVMValue(rl_obj.orig_sreg));
- args.push_back(irb_->getInt32(field_index));
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- irb_->CreateCall(intr, args);
-}
-
-void MirConverter::ConvertInstanceOf(uint32_t type_idx,
- RegLocation rl_dest, RegLocation rl_src) {
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::InstanceOf;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(irb_->getInt32(type_idx));
- args.push_back(GetLLVMValue(rl_src.orig_sreg));
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* res = irb_->CreateSExt(GetLLVMValue(rl_src.orig_sreg),
- irb_->getInt64Ty());
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
- ::llvm::Value* res = irb_->CreateTrunc(src, irb_->getInt32Ty());
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
- ::llvm::Value* res = irb_->CreateFPExt(src, irb_->getDoubleTy());
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
- ::llvm::Value* res = irb_->CreateFPTrunc(src, irb_->getFloatTy());
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- DCHECK_EQ(rl_src1.fp, rl_src2.fp);
- DCHECK_EQ(rl_src1.wide, rl_src2.wide);
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- args.push_back(GetLLVMValue(rl_src1.orig_sreg));
- args.push_back(GetLLVMValue(rl_src2.orig_sreg));
- ::llvm::Value* res = irb_->CreateCall(intr, args);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
- art::llvm::IntrinsicHelper::IntrinsicId id) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res =
- irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertNeg(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* res = irb_->CreateNeg(GetLLVMValue(rl_src.orig_sreg));
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest,
- RegLocation rl_src) {
- ::llvm::Value* res =
- irb_->CreateSIToFP(GetLLVMValue(rl_src.orig_sreg), ty);
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest,
- RegLocation rl_src) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* res = irb_->CreateCall(intr, GetLLVMValue(rl_src.orig_sreg));
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-
-void MirConverter::ConvertNegFP(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* res =
- irb_->CreateFNeg(GetLLVMValue(rl_src.orig_sreg));
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::ConvertNot(RegLocation rl_dest, RegLocation rl_src) {
- ::llvm::Value* src = GetLLVMValue(rl_src.orig_sreg);
- ::llvm::Value* res = irb_->CreateXor(src, static_cast<uint64_t>(-1));
- DefineValue(res, rl_dest.orig_sreg);
-}
-
-void MirConverter::EmitConstructorBarrier() {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::ConstructorBarrier);
- irb_->CreateCall(intr);
-}
-
-/*
- * Target-independent code generation. Use only high-level
- * load/store utilities here, or target-dependent genXX() handlers
- * when necessary.
- */
-bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb,
- ::llvm::BasicBlock* llvm_bb) {
- bool res = false; // Assume success
- RegLocation rl_src[3];
- RegLocation rl_dest = mir_graph_->GetBadLoc();
- Instruction::Code opcode = mir->dalvikInsn.opcode;
- int op_val = opcode;
- uint32_t vB = mir->dalvikInsn.vB;
- uint32_t vC = mir->dalvikInsn.vC;
- int opt_flags = mir->optimization_flags;
-
- if (cu_->verbose) {
- if (!IsPseudoMirOp(op_val)) {
- LOG(INFO) << ".. " << Instruction::Name(opcode) << " 0x" << std::hex << op_val;
- } else {
- LOG(INFO) << mir_graph_->extended_mir_op_names_[op_val - kMirOpFirst] << " 0x" << std::hex << op_val;
- }
- }
-
- /* Prep Src and Dest locations */
- int next_sreg = 0;
- int next_loc = 0;
- uint64_t attrs = MirGraph::GetDataFlowAttributes(opcode);
- rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
- if (attrs & DF_UA) {
- if (attrs & DF_A_WIDE) {
- rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
- next_sreg+= 2;
- } else {
- rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
- next_sreg++;
- }
- }
- if (attrs & DF_UB) {
- if (attrs & DF_B_WIDE) {
- rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
- next_sreg+= 2;
- } else {
- rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
- next_sreg++;
- }
- }
- if (attrs & DF_UC) {
- if (attrs & DF_C_WIDE) {
- rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
- } else {
- rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
- }
- }
- if (attrs & DF_DA) {
- if (attrs & DF_A_WIDE) {
- rl_dest = mir_graph_->GetDestWide(mir);
- } else {
- rl_dest = mir_graph_->GetDest(mir);
- }
- }
-
- switch (opcode) {
- case Instruction::NOP:
- break;
-
- case Instruction::MOVE:
- case Instruction::MOVE_OBJECT:
- case Instruction::MOVE_16:
- case Instruction::MOVE_OBJECT_16:
- case Instruction::MOVE_OBJECT_FROM16:
- case Instruction::MOVE_FROM16:
- case Instruction::MOVE_WIDE:
- case Instruction::MOVE_WIDE_16:
- case Instruction::MOVE_WIDE_FROM16: {
- /*
- * Moves/copies are meaningless in pure SSA register form,
- * but we need to preserve them for the conversion back into
- * MIR (at least until we stop using the Dalvik register maps).
- * Insert a dummy intrinsic copy call, which will be recognized
- * by the quick path and removed by the portable path.
- */
- ::llvm::Value* src = GetLLVMValue(rl_src[0].orig_sreg);
- ::llvm::Value* res = EmitCopy(src, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
-
- case Instruction::CONST:
- case Instruction::CONST_4:
- case Instruction::CONST_16: {
- ::llvm::Constant* imm_value = irb_->getJInt(vB);
- ::llvm::Value* res = EmitConst(imm_value, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
-
- case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32: {
- // Sign extend to 64 bits
- int64_t imm = static_cast<int32_t>(vB);
- ::llvm::Constant* imm_value = irb_->getJLong(imm);
- ::llvm::Value* res = EmitConst(imm_value, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
-
- case Instruction::CONST_HIGH16: {
- ::llvm::Constant* imm_value = irb_->getJInt(vB << 16);
- ::llvm::Value* res = EmitConst(imm_value, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
-
- case Instruction::CONST_WIDE: {
- ::llvm::Constant* imm_value =
- irb_->getJLong(mir->dalvikInsn.vB_wide);
- ::llvm::Value* res = EmitConst(imm_value, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
- case Instruction::CONST_WIDE_HIGH16: {
- int64_t imm = static_cast<int64_t>(vB) << 48;
- ::llvm::Constant* imm_value = irb_->getJLong(imm);
- ::llvm::Value* res = EmitConst(imm_value, rl_dest);
- DefineValue(res, rl_dest.orig_sreg);
- }
- break;
-
- case Instruction::SPUT_OBJECT:
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputObject,
- rl_src[0]);
- break;
- case Instruction::SPUT:
- if (rl_src[0].fp) {
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputFloat,
- rl_src[0]);
- } else {
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSput, rl_src[0]);
- }
- break;
- case Instruction::SPUT_BOOLEAN:
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputBoolean,
- rl_src[0]);
- break;
- case Instruction::SPUT_BYTE:
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputByte, rl_src[0]);
- break;
- case Instruction::SPUT_CHAR:
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputChar, rl_src[0]);
- break;
- case Instruction::SPUT_SHORT:
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputShort, rl_src[0]);
- break;
- case Instruction::SPUT_WIDE:
- if (rl_src[0].fp) {
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputDouble,
- rl_src[0]);
- } else {
- ConvertSput(vB, art::llvm::IntrinsicHelper::HLSputWide,
- rl_src[0]);
- }
- break;
-
- case Instruction::SGET_OBJECT:
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetObject, rl_dest);
- break;
- case Instruction::SGET:
- if (rl_dest.fp) {
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetFloat, rl_dest);
- } else {
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSget, rl_dest);
- }
- break;
- case Instruction::SGET_BOOLEAN:
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetBoolean, rl_dest);
- break;
- case Instruction::SGET_BYTE:
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetByte, rl_dest);
- break;
- case Instruction::SGET_CHAR:
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetChar, rl_dest);
- break;
- case Instruction::SGET_SHORT:
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetShort, rl_dest);
- break;
- case Instruction::SGET_WIDE:
- if (rl_dest.fp) {
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetDouble,
- rl_dest);
- } else {
- ConvertSget(vB, art::llvm::IntrinsicHelper::HLSgetWide, rl_dest);
- }
- break;
-
- case Instruction::RETURN_WIDE:
- case Instruction::RETURN:
- case Instruction::RETURN_OBJECT: {
- if (!mir_graph_->MethodIsLeaf()) {
- EmitSuspendCheck();
- }
- EmitPopShadowFrame();
- irb_->CreateRet(GetLLVMValue(rl_src[0].orig_sreg));
- DCHECK(bb->terminated_by_return);
- }
- break;
-
- case Instruction::RETURN_VOID: {
- if (((cu_->access_flags & kAccConstructor) != 0) &&
- cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(),
- cu_->dex_file,
- cu_->class_def_idx)) {
- EmitConstructorBarrier();
- }
- if (!mir_graph_->MethodIsLeaf()) {
- EmitSuspendCheck();
- }
- EmitPopShadowFrame();
- irb_->CreateRetVoid();
- DCHECK(bb->terminated_by_return);
- }
- break;
-
- case Instruction::IF_EQ:
- ConvertCompareAndBranch(bb, mir, kCondEq, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_NE:
- ConvertCompareAndBranch(bb, mir, kCondNe, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_LT:
- ConvertCompareAndBranch(bb, mir, kCondLt, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_GE:
- ConvertCompareAndBranch(bb, mir, kCondGe, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_GT:
- ConvertCompareAndBranch(bb, mir, kCondGt, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_LE:
- ConvertCompareAndBranch(bb, mir, kCondLe, rl_src[0], rl_src[1]);
- break;
- case Instruction::IF_EQZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondEq, rl_src[0]);
- break;
- case Instruction::IF_NEZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondNe, rl_src[0]);
- break;
- case Instruction::IF_LTZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondLt, rl_src[0]);
- break;
- case Instruction::IF_GEZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondGe, rl_src[0]);
- break;
- case Instruction::IF_GTZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondGt, rl_src[0]);
- break;
- case Instruction::IF_LEZ:
- ConvertCompareZeroAndBranch(bb, mir, kCondLe, rl_src[0]);
- break;
-
- case Instruction::GOTO:
- case Instruction::GOTO_16:
- case Instruction::GOTO_32: {
- if (mir_graph_->GetBasicBlock(bb->taken)->start_offset <= bb->start_offset) {
- EmitSuspendCheck();
- }
- irb_->CreateBr(GetLLVMBlock(bb->taken));
- }
- break;
-
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- case Instruction::ADD_INT:
- case Instruction::ADD_INT_2ADDR:
- ConvertArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
- case Instruction::SUB_INT:
- case Instruction::SUB_INT_2ADDR:
- ConvertArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::MUL_LONG:
- case Instruction::MUL_LONG_2ADDR:
- case Instruction::MUL_INT:
- case Instruction::MUL_INT_2ADDR:
- ConvertArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::DIV_LONG:
- case Instruction::DIV_LONG_2ADDR:
- case Instruction::DIV_INT:
- case Instruction::DIV_INT_2ADDR:
- ConvertArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::REM_LONG:
- case Instruction::REM_LONG_2ADDR:
- case Instruction::REM_INT:
- case Instruction::REM_INT_2ADDR:
- ConvertArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AND_LONG:
- case Instruction::AND_LONG_2ADDR:
- case Instruction::AND_INT:
- case Instruction::AND_INT_2ADDR:
- ConvertArithOp(kOpAnd, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::OR_LONG:
- case Instruction::OR_LONG_2ADDR:
- case Instruction::OR_INT:
- case Instruction::OR_INT_2ADDR:
- ConvertArithOp(kOpOr, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::XOR_LONG:
- case Instruction::XOR_LONG_2ADDR:
- case Instruction::XOR_INT:
- case Instruction::XOR_INT_2ADDR:
- ConvertArithOp(kOpXor, rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::SHLLong,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::SHL_INT:
- case Instruction::SHL_INT_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::SHLInt,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::SHRLong,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::SHR_INT:
- case Instruction::SHR_INT_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::SHRInt,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::USHRLong,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::USHR_INT:
- case Instruction::USHR_INT_2ADDR:
- ConvertShift(art::llvm::IntrinsicHelper::USHRInt,
- rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::ADD_INT_LIT16:
- case Instruction::ADD_INT_LIT8:
- ConvertArithOpLit(kOpAdd, rl_dest, rl_src[0], vC);
- break;
- case Instruction::RSUB_INT:
- case Instruction::RSUB_INT_LIT8:
- ConvertArithOpLit(kOpRsub, rl_dest, rl_src[0], vC);
- break;
- case Instruction::MUL_INT_LIT16:
- case Instruction::MUL_INT_LIT8:
- ConvertArithOpLit(kOpMul, rl_dest, rl_src[0], vC);
- break;
- case Instruction::DIV_INT_LIT16:
- case Instruction::DIV_INT_LIT8:
- ConvertArithOpLit(kOpDiv, rl_dest, rl_src[0], vC);
- break;
- case Instruction::REM_INT_LIT16:
- case Instruction::REM_INT_LIT8:
- ConvertArithOpLit(kOpRem, rl_dest, rl_src[0], vC);
- break;
- case Instruction::AND_INT_LIT16:
- case Instruction::AND_INT_LIT8:
- ConvertArithOpLit(kOpAnd, rl_dest, rl_src[0], vC);
- break;
- case Instruction::OR_INT_LIT16:
- case Instruction::OR_INT_LIT8:
- ConvertArithOpLit(kOpOr, rl_dest, rl_src[0], vC);
- break;
- case Instruction::XOR_INT_LIT16:
- case Instruction::XOR_INT_LIT8:
- ConvertArithOpLit(kOpXor, rl_dest, rl_src[0], vC);
- break;
- case Instruction::SHL_INT_LIT8:
- ConvertShiftLit(art::llvm::IntrinsicHelper::SHLInt,
- rl_dest, rl_src[0], vC & 0x1f);
- break;
- case Instruction::SHR_INT_LIT8:
- ConvertShiftLit(art::llvm::IntrinsicHelper::SHRInt,
- rl_dest, rl_src[0], vC & 0x1f);
- break;
- case Instruction::USHR_INT_LIT8:
- ConvertShiftLit(art::llvm::IntrinsicHelper::USHRInt,
- rl_dest, rl_src[0], vC & 0x1f);
- break;
-
- case Instruction::ADD_FLOAT:
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_DOUBLE:
- case Instruction::ADD_DOUBLE_2ADDR:
- ConvertFPArithOp(kOpAdd, rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::SUB_FLOAT:
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_DOUBLE:
- case Instruction::SUB_DOUBLE_2ADDR:
- ConvertFPArithOp(kOpSub, rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::MUL_FLOAT:
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_DOUBLE:
- case Instruction::MUL_DOUBLE_2ADDR:
- ConvertFPArithOp(kOpMul, rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::DIV_FLOAT:
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_DOUBLE:
- case Instruction::DIV_DOUBLE_2ADDR:
- ConvertFPArithOp(kOpDiv, rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::REM_FLOAT:
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_DOUBLE:
- case Instruction::REM_DOUBLE_2ADDR:
- ConvertFPArithOp(kOpRem, rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::INVOKE_STATIC:
- ConvertInvoke(bb, mir, kStatic, false /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::INVOKE_STATIC_RANGE:
- ConvertInvoke(bb, mir, kStatic, true /*range*/,
- false /* NewFilledArray */);
- break;
-
- case Instruction::INVOKE_DIRECT:
- ConvertInvoke(bb, mir, kDirect, false /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::INVOKE_DIRECT_RANGE:
- ConvertInvoke(bb, mir, kDirect, true /*range*/,
- false /* NewFilledArray */);
- break;
-
- case Instruction::INVOKE_VIRTUAL:
- ConvertInvoke(bb, mir, kVirtual, false /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
- ConvertInvoke(bb, mir, kVirtual, true /*range*/,
- false /* NewFilledArray */);
- break;
-
- case Instruction::INVOKE_SUPER:
- ConvertInvoke(bb, mir, kSuper, false /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::INVOKE_SUPER_RANGE:
- ConvertInvoke(bb, mir, kSuper, true /*range*/,
- false /* NewFilledArray */);
- break;
-
- case Instruction::INVOKE_INTERFACE:
- ConvertInvoke(bb, mir, kInterface, false /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::INVOKE_INTERFACE_RANGE:
- ConvertInvoke(bb, mir, kInterface, true /*range*/,
- false /* NewFilledArray */);
- break;
- case Instruction::FILLED_NEW_ARRAY:
- ConvertInvoke(bb, mir, kInterface, false /*range*/,
- true /* NewFilledArray */);
- break;
- case Instruction::FILLED_NEW_ARRAY_RANGE:
- ConvertInvoke(bb, mir, kInterface, true /*range*/,
- true /* NewFilledArray */);
- break;
-
- case Instruction::CONST_STRING:
- case Instruction::CONST_STRING_JUMBO:
- ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstString,
- rl_dest);
- break;
-
- case Instruction::CONST_CLASS:
- ConvertConstObject(vB, art::llvm::IntrinsicHelper::ConstClass,
- rl_dest);
- break;
-
- case Instruction::CHECK_CAST:
- ConvertCheckCast(vB, rl_src[0]);
- break;
-
- case Instruction::NEW_INSTANCE:
- ConvertNewInstance(vB, rl_dest);
- break;
-
- case Instruction::MOVE_EXCEPTION:
- ConvertMoveException(rl_dest);
- break;
-
- case Instruction::THROW:
- ConvertThrow(rl_src[0]);
- /*
- * If this throw is standalone, terminate.
- * If it might rethrow, force termination
- * of the following block.
- */
- if (bb->fall_through == NullBasicBlockId) {
- irb_->CreateUnreachable();
- } else {
- mir_graph_->GetBasicBlock(bb->fall_through)->fall_through = NullBasicBlockId;
- mir_graph_->GetBasicBlock(bb->fall_through)->taken = NullBasicBlockId;
- }
- break;
-
- case Instruction::MOVE_RESULT_WIDE:
- case Instruction::MOVE_RESULT:
- case Instruction::MOVE_RESULT_OBJECT:
- /*
- * All move_results should have been folded into the preceeding invoke.
- */
- LOG(FATAL) << "Unexpected move_result";
- break;
-
- case Instruction::MONITOR_ENTER:
- ConvertMonitorEnterExit(opt_flags,
- art::llvm::IntrinsicHelper::MonitorEnter,
- rl_src[0]);
- break;
-
- case Instruction::MONITOR_EXIT:
- ConvertMonitorEnterExit(opt_flags,
- art::llvm::IntrinsicHelper::MonitorExit,
- rl_src[0]);
- break;
-
- case Instruction::ARRAY_LENGTH:
- ConvertArrayLength(opt_flags, rl_dest, rl_src[0]);
- break;
-
- case Instruction::NEW_ARRAY:
- ConvertNewArray(vC, rl_dest, rl_src[0]);
- break;
-
- case Instruction::INSTANCE_OF:
- ConvertInstanceOf(vC, rl_dest, rl_src[0]);
- break;
-
- case Instruction::AGET:
- if (rl_dest.fp) {
- ConvertAget(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayGetFloat,
- rl_dest, rl_src[0], rl_src[1]);
- } else {
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGet,
- rl_dest, rl_src[0], rl_src[1]);
- }
- break;
- case Instruction::AGET_OBJECT:
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetObject,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AGET_BOOLEAN:
- ConvertAget(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayGetBoolean,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AGET_BYTE:
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetByte,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AGET_CHAR:
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetChar,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AGET_SHORT:
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetShort,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::AGET_WIDE:
- if (rl_dest.fp) {
- ConvertAget(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayGetDouble,
- rl_dest, rl_src[0], rl_src[1]);
- } else {
- ConvertAget(opt_flags, art::llvm::IntrinsicHelper::HLArrayGetWide,
- rl_dest, rl_src[0], rl_src[1]);
- }
- break;
-
- case Instruction::APUT:
- if (rl_src[0].fp) {
- ConvertAput(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayPutFloat,
- rl_src[0], rl_src[1], rl_src[2]);
- } else {
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPut,
- rl_src[0], rl_src[1], rl_src[2]);
- }
- break;
- case Instruction::APUT_OBJECT:
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutObject,
- rl_src[0], rl_src[1], rl_src[2]);
- break;
- case Instruction::APUT_BOOLEAN:
- ConvertAput(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayPutBoolean,
- rl_src[0], rl_src[1], rl_src[2]);
- break;
- case Instruction::APUT_BYTE:
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutByte,
- rl_src[0], rl_src[1], rl_src[2]);
- break;
- case Instruction::APUT_CHAR:
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutChar,
- rl_src[0], rl_src[1], rl_src[2]);
- break;
- case Instruction::APUT_SHORT:
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutShort,
- rl_src[0], rl_src[1], rl_src[2]);
- break;
- case Instruction::APUT_WIDE:
- if (rl_src[0].fp) {
- ConvertAput(opt_flags,
- art::llvm::IntrinsicHelper::HLArrayPutDouble,
- rl_src[0], rl_src[1], rl_src[2]);
- } else {
- ConvertAput(opt_flags, art::llvm::IntrinsicHelper::HLArrayPutWide,
- rl_src[0], rl_src[1], rl_src[2]);
- }
- break;
-
- case Instruction::IGET:
- if (rl_dest.fp) {
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetFloat,
- rl_dest, rl_src[0], vC);
- } else {
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGet,
- rl_dest, rl_src[0], vC);
- }
- break;
- case Instruction::IGET_OBJECT:
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetObject,
- rl_dest, rl_src[0], vC);
- break;
- case Instruction::IGET_BOOLEAN:
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetBoolean,
- rl_dest, rl_src[0], vC);
- break;
- case Instruction::IGET_BYTE:
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetByte,
- rl_dest, rl_src[0], vC);
- break;
- case Instruction::IGET_CHAR:
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetChar,
- rl_dest, rl_src[0], vC);
- break;
- case Instruction::IGET_SHORT:
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetShort,
- rl_dest, rl_src[0], vC);
- break;
- case Instruction::IGET_WIDE:
- if (rl_dest.fp) {
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetDouble,
- rl_dest, rl_src[0], vC);
- } else {
- ConvertIget(opt_flags, art::llvm::IntrinsicHelper::HLIGetWide,
- rl_dest, rl_src[0], vC);
- }
- break;
- case Instruction::IPUT:
- if (rl_src[0].fp) {
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutFloat,
- rl_src[0], rl_src[1], vC);
- } else {
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPut,
- rl_src[0], rl_src[1], vC);
- }
- break;
- case Instruction::IPUT_OBJECT:
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutObject,
- rl_src[0], rl_src[1], vC);
- break;
- case Instruction::IPUT_BOOLEAN:
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutBoolean,
- rl_src[0], rl_src[1], vC);
- break;
- case Instruction::IPUT_BYTE:
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutByte,
- rl_src[0], rl_src[1], vC);
- break;
- case Instruction::IPUT_CHAR:
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutChar,
- rl_src[0], rl_src[1], vC);
- break;
- case Instruction::IPUT_SHORT:
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutShort,
- rl_src[0], rl_src[1], vC);
- break;
- case Instruction::IPUT_WIDE:
- if (rl_src[0].fp) {
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutDouble,
- rl_src[0], rl_src[1], vC);
- } else {
- ConvertIput(opt_flags, art::llvm::IntrinsicHelper::HLIPutWide,
- rl_src[0], rl_src[1], vC);
- }
- break;
-
- case Instruction::FILL_ARRAY_DATA:
- ConvertFillArrayData(vB, rl_src[0]);
- break;
-
- case Instruction::LONG_TO_INT:
- ConvertLongToInt(rl_dest, rl_src[0]);
- break;
-
- case Instruction::INT_TO_LONG:
- ConvertIntToLong(rl_dest, rl_src[0]);
- break;
-
- case Instruction::INT_TO_CHAR:
- ConvertIntNarrowing(rl_dest, rl_src[0],
- art::llvm::IntrinsicHelper::IntToChar);
- break;
- case Instruction::INT_TO_BYTE:
- ConvertIntNarrowing(rl_dest, rl_src[0],
- art::llvm::IntrinsicHelper::IntToByte);
- break;
- case Instruction::INT_TO_SHORT:
- ConvertIntNarrowing(rl_dest, rl_src[0],
- art::llvm::IntrinsicHelper::IntToShort);
- break;
-
- case Instruction::INT_TO_FLOAT:
- case Instruction::LONG_TO_FLOAT:
- ConvertIntToFP(irb_->getFloatTy(), rl_dest, rl_src[0]);
- break;
-
- case Instruction::INT_TO_DOUBLE:
- case Instruction::LONG_TO_DOUBLE:
- ConvertIntToFP(irb_->getDoubleTy(), rl_dest, rl_src[0]);
- break;
-
- case Instruction::FLOAT_TO_DOUBLE:
- ConvertFloatToDouble(rl_dest, rl_src[0]);
- break;
-
- case Instruction::DOUBLE_TO_FLOAT:
- ConvertDoubleToFloat(rl_dest, rl_src[0]);
- break;
-
- case Instruction::NEG_LONG:
- case Instruction::NEG_INT:
- ConvertNeg(rl_dest, rl_src[0]);
- break;
-
- case Instruction::NEG_FLOAT:
- case Instruction::NEG_DOUBLE:
- ConvertNegFP(rl_dest, rl_src[0]);
- break;
-
- case Instruction::NOT_LONG:
- case Instruction::NOT_INT:
- ConvertNot(rl_dest, rl_src[0]);
- break;
-
- case Instruction::FLOAT_TO_INT:
- ConvertFPToInt(art::llvm::IntrinsicHelper::F2I, rl_dest, rl_src[0]);
- break;
-
- case Instruction::DOUBLE_TO_INT:
- ConvertFPToInt(art::llvm::IntrinsicHelper::D2I, rl_dest, rl_src[0]);
- break;
-
- case Instruction::FLOAT_TO_LONG:
- ConvertFPToInt(art::llvm::IntrinsicHelper::F2L, rl_dest, rl_src[0]);
- break;
-
- case Instruction::DOUBLE_TO_LONG:
- ConvertFPToInt(art::llvm::IntrinsicHelper::D2L, rl_dest, rl_src[0]);
- break;
-
- case Instruction::CMPL_FLOAT:
- ConvertWideComparison(art::llvm::IntrinsicHelper::CmplFloat,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::CMPG_FLOAT:
- ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgFloat,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::CMPL_DOUBLE:
- ConvertWideComparison(art::llvm::IntrinsicHelper::CmplDouble,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::CMPG_DOUBLE:
- ConvertWideComparison(art::llvm::IntrinsicHelper::CmpgDouble,
- rl_dest, rl_src[0], rl_src[1]);
- break;
- case Instruction::CMP_LONG:
- ConvertWideComparison(art::llvm::IntrinsicHelper::CmpLong,
- rl_dest, rl_src[0], rl_src[1]);
- break;
-
- case Instruction::PACKED_SWITCH:
- ConvertPackedSwitch(bb, vB, rl_src[0]);
- break;
-
- case Instruction::SPARSE_SWITCH:
- ConvertSparseSwitch(bb, vB, rl_src[0]);
- break;
-
- default:
- UNIMPLEMENTED(FATAL) << "Unsupported Dex opcode 0x" << std::hex << opcode;
- res = true;
- }
- return res;
-} // NOLINT(readability/fn_size)
-
-void MirConverter::SetDexOffset(int32_t offset) {
- current_dalvik_offset_ = offset;
- ::llvm::SmallVector< ::llvm::Value*, 1> array_ref;
- array_ref.push_back(irb_->getInt32(offset));
- ::llvm::MDNode* node = ::llvm::MDNode::get(*context_, array_ref);
- irb_->SetDexOffset(node);
-}
-
-// Attach method info as metadata to special intrinsic
-void MirConverter::SetMethodInfo() {
- // We don't want dex offset on this
- irb_->SetDexOffset(NULL);
- art::llvm::IntrinsicHelper::IntrinsicId id;
- id = art::llvm::IntrinsicHelper::MethodInfo;
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Instruction* inst = irb_->CreateCall(intr);
- ::llvm::SmallVector< ::llvm::Value*, 2> reg_info;
- reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfInVRs()));
- reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfLocalCodeVRs()));
- reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfOutVRs()));
- reg_info.push_back(irb_->getInt32(mir_graph_->GetNumUsedCompilerTemps()));
- reg_info.push_back(irb_->getInt32(mir_graph_->GetNumSSARegs()));
- ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*context_, reg_info);
- inst->setMetadata("RegInfo", reg_info_node);
- SetDexOffset(current_dalvik_offset_);
-}
-
-void MirConverter::HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb) {
- SetDexOffset(bb->start_offset);
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- int opcode = mir->dalvikInsn.opcode;
- if (!IsPseudoMirOp(opcode)) {
- // Stop after first non-pseudo MIR op.
- continue;
- }
- if (opcode != kMirOpPhi) {
- // Skip other mir Pseudos.
- continue;
- }
- RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
- /*
- * The Art compiler's Phi nodes only handle 32-bit operands,
- * representing wide values using a matched set of Phi nodes
- * for the lower and upper halves. In the llvm world, we only
- * want a single Phi for wides. Here we will simply discard
- * the Phi node representing the high word.
- */
- if (rl_dest.high_word) {
- continue; // No Phi node - handled via low word
- }
- BasicBlockId* incoming = mir->meta.phi_incoming;
- ::llvm::Type* phi_type =
- LlvmTypeFromLocRec(rl_dest);
- ::llvm::PHINode* phi = irb_->CreatePHI(phi_type, mir->ssa_rep->num_uses);
- for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
- RegLocation loc;
- // Don't check width here.
- loc = mir_graph_->GetRawSrc(mir, i);
- DCHECK_EQ(rl_dest.wide, loc.wide);
- DCHECK_EQ(rl_dest.wide & rl_dest.high_word, loc.wide & loc.high_word);
- DCHECK_EQ(rl_dest.fp, loc.fp);
- DCHECK_EQ(rl_dest.core, loc.core);
- DCHECK_EQ(rl_dest.ref, loc.ref);
- SafeMap<unsigned int, unsigned int>::iterator it;
- it = mir_graph_->block_id_map_.find(incoming[i]);
- DCHECK(it != mir_graph_->block_id_map_.end());
- DCHECK(GetLLVMValue(loc.orig_sreg) != NULL);
- DCHECK(GetLLVMBlock(it->second) != NULL);
- phi->addIncoming(GetLLVMValue(loc.orig_sreg),
- GetLLVMBlock(it->second));
- }
- DefineValueOnly(phi, rl_dest.orig_sreg);
- }
-}
-
-/* Extended MIR instructions like PHI */
-void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir,
- ::llvm::BasicBlock* llvm_bb) {
- switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
- case kMirOpPhi: {
- // The llvm Phi node already emitted - just DefineValue() here.
- RegLocation rl_dest = mir_graph_->reg_location_[mir->ssa_rep->defs[0]];
- if (!rl_dest.high_word) {
- // Only consider low word of pairs.
- DCHECK(GetLLVMValue(rl_dest.orig_sreg) != NULL);
- ::llvm::Value* phi = GetLLVMValue(rl_dest.orig_sreg);
- if (1) SetVregOnValue(phi, rl_dest.orig_sreg);
- }
- break;
- }
- case kMirOpCopy: {
- UNIMPLEMENTED(WARNING) << "unimp kMirOpPhi";
- break;
- }
- case kMirOpNop:
- if ((mir == bb->last_mir_insn) && (bb->taken == NullBasicBlockId) &&
- (bb->fall_through == NullBasicBlockId)) {
- irb_->CreateUnreachable();
- }
- break;
-
- // TODO: need GBC intrinsic to take advantage of fused operations
- case kMirOpFusedCmplFloat:
- UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpFloat unsupported";
- break;
- case kMirOpFusedCmpgFloat:
- UNIMPLEMENTED(FATAL) << "kMirOpFusedCmgFloat unsupported";
- break;
- case kMirOpFusedCmplDouble:
- UNIMPLEMENTED(FATAL) << "kMirOpFusedCmplDouble unsupported";
- break;
- case kMirOpFusedCmpgDouble:
- UNIMPLEMENTED(FATAL) << "kMirOpFusedCmpgDouble unsupported";
- break;
- case kMirOpFusedCmpLong:
- UNIMPLEMENTED(FATAL) << "kMirOpLongCmpBranch unsupported";
- break;
- default:
- break;
- }
-}
-
-/* Handle the content in each basic block */
-bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) {
- if (bb->block_type == kDead) return false;
- ::llvm::BasicBlock* llvm_bb = GetLLVMBlock(bb->id);
- if (llvm_bb == NULL) {
- CHECK(bb->block_type == kExitBlock);
- } else {
- irb_->SetInsertPoint(llvm_bb);
- SetDexOffset(bb->start_offset);
- }
-
- if (cu_->verbose) {
- LOG(INFO) << "................................";
- LOG(INFO) << "Block id " << bb->id;
- if (llvm_bb != NULL) {
- LOG(INFO) << "label " << llvm_bb->getName().str().c_str();
- } else {
- LOG(INFO) << "llvm_bb is NULL";
- }
- }
-
- if (bb->block_type == kEntryBlock) {
- SetMethodInfo();
-
- { // Allocate shadowframe.
- art::llvm::IntrinsicHelper::IntrinsicId id =
- art::llvm::IntrinsicHelper::AllocaShadowFrame;
- ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
- ::llvm::Value* entries = irb_->getInt32(mir_graph_->GetNumOfCodeVRs());
- irb_->CreateCall(func, entries);
- }
-
- { // Store arguments to vregs.
- uint16_t arg_reg = mir_graph_->GetFirstInVR();
-
- ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
-
- const char* shorty = cu_->shorty;
- uint32_t shorty_size = strlen(shorty);
- CHECK_GE(shorty_size, 1u);
-
- ++arg_iter; // skip method object
-
- if ((cu_->access_flags & kAccStatic) == 0) {
- SetVregOnValue(arg_iter, arg_reg);
- ++arg_iter;
- ++arg_reg;
- }
-
- for (uint32_t i = 1; i < shorty_size; ++i, ++arg_iter) {
- SetVregOnValue(arg_iter, arg_reg);
-
- ++arg_reg;
- if (shorty[i] == 'J' || shorty[i] == 'D') {
- // Wide types, such as long and double, are using a pair of registers
- // to store the value, so we have to increase arg_reg again.
- ++arg_reg;
- }
- }
- }
- } else if (bb->block_type == kExitBlock) {
- /*
- * Because of the differences between how MIR/LIR and llvm handle exit
- * blocks, we won't explicitly covert them. On the llvm-to-lir
- * path, it will need to be regenereated.
- */
- return false;
- } else if (bb->block_type == kExceptionHandling) {
- /*
- * Because we're deferring null checking, delete the associated empty
- * exception block.
- */
- llvm_bb->eraseFromParent();
- return false;
- }
-
- HandlePhiNodes(bb, llvm_bb);
-
- for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
- SetDexOffset(mir->offset);
-
- int opcode = mir->dalvikInsn.opcode;
- Instruction::Format dalvik_format =
- Instruction::FormatOf(mir->dalvikInsn.opcode);
-
- if (opcode == kMirOpCheck) {
- // Combine check and work halves of throwing instruction.
- MIR* work_half = mir->meta.throw_insn;
- mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
- opcode = mir->dalvikInsn.opcode;
- SSARepresentation* ssa_rep = work_half->ssa_rep;
- work_half->ssa_rep = mir->ssa_rep;
- mir->ssa_rep = ssa_rep;
- work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
- if (bb->successor_block_list_type == kCatch) {
- ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(
- art::llvm::IntrinsicHelper::CatchTargets);
- ::llvm::Value* switch_key =
- irb_->CreateCall(intr, irb_->getInt32(mir->offset));
- // New basic block to use for work half
- ::llvm::BasicBlock* work_bb =
- ::llvm::BasicBlock::Create(*context_, "", func_);
- ::llvm::SwitchInst* sw =
- irb_->CreateSwitch(switch_key, work_bb, bb->successor_blocks.size());
- for (SuccessorBlockInfo *successor_block_info : bb->successor_blocks) {
- ::llvm::BasicBlock *target =
- GetLLVMBlock(successor_block_info->block);
- int type_index = successor_block_info->key;
- sw->addCase(irb_->getInt32(type_index), target);
- }
- llvm_bb = work_bb;
- irb_->SetInsertPoint(llvm_bb);
- }
- }
-
- if (IsPseudoMirOp(opcode)) {
- ConvertExtendedMIR(bb, mir, llvm_bb);
- continue;
- }
-
- bool not_handled = ConvertMIRNode(mir, bb, llvm_bb);
- if (not_handled) {
- Instruction::Code dalvik_opcode = static_cast<Instruction::Code>(opcode);
- LOG(WARNING) << StringPrintf("%#06x: Op %#x (%s) / Fmt %d not handled",
- mir->offset, opcode,
- Instruction::Name(dalvik_opcode),
- dalvik_format);
- }
- }
-
- if (bb->block_type == kEntryBlock) {
- entry_target_bb_ = GetLLVMBlock(bb->fall_through);
- } else if ((bb->fall_through != NullBasicBlockId) && !bb->terminated_by_return) {
- irb_->CreateBr(GetLLVMBlock(bb->fall_through));
- }
-
- return false;
-}
-
-char RemapShorty(char shorty_type) {
- /*
- * TODO: might want to revisit this. Dalvik registers are 32-bits wide,
- * and longs/doubles are represented as a pair of registers. When sub-word
- * arguments (and method results) are passed, they are extended to Dalvik
- * virtual register containers. Because llvm is picky about type consistency,
- * we must either cast the "real" type to 32-bit container multiple Dalvik
- * register types, or always use the expanded values.
- * Here, we're doing the latter. We map the shorty signature to container
- * types (which is valid so long as we always do a real expansion of passed
- * arguments and field loads).
- */
- switch (shorty_type) {
- case 'Z' : shorty_type = 'I'; break;
- case 'B' : shorty_type = 'I'; break;
- case 'S' : shorty_type = 'I'; break;
- case 'C' : shorty_type = 'I'; break;
- default: break;
- }
- return shorty_type;
-}
-
-::llvm::FunctionType* MirConverter::GetFunctionType() {
- // Get return type
- ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0]));
-
- // Get argument type
- std::vector< ::llvm::Type*> args_type;
-
- // method object
- args_type.push_back(irb_->getJMethodTy());
-
- // Do we have a "this"?
- if ((cu_->access_flags & kAccStatic) == 0) {
- args_type.push_back(irb_->getJObjectTy());
- }
-
- for (uint32_t i = 1; i < strlen(cu_->shorty); ++i) {
- args_type.push_back(irb_->getJType(RemapShorty(cu_->shorty[i])));
- }
-
- return ::llvm::FunctionType::get(ret_type, args_type, false);
-}
-
-bool MirConverter::CreateFunction() {
- ::llvm::FunctionType* func_type = GetFunctionType();
- if (func_type == NULL) {
- return false;
- }
-
- func_ = ::llvm::Function::Create(func_type,
- ::llvm::Function::InternalLinkage,
- symbol_, module_);
-
- ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
- ::llvm::Function::arg_iterator arg_end(func_->arg_end());
-
- arg_iter->setName("method");
- ++arg_iter;
-
- int start_sreg = mir_graph_->GetFirstInVR();
-
- for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
- arg_iter->setName(StringPrintf("v%i_0", start_sreg));
- start_sreg += mir_graph_->reg_location_[start_sreg].wide ? 2 : 1;
- }
-
- return true;
-}
-
-bool MirConverter::CreateLLVMBasicBlock(BasicBlock* bb) {
- // Skip the exit block
- if ((bb->block_type == kDead) ||(bb->block_type == kExitBlock)) {
- id_to_block_map_.Put(bb->id, NULL);
- } else {
- int offset = bb->start_offset;
- bool entry_block = (bb->block_type == kEntryBlock);
- ::llvm::BasicBlock* llvm_bb =
- ::llvm::BasicBlock::Create(*context_, entry_block ? "entry" :
- StringPrintf(kLabelFormat, bb->catch_entry ? kCatchBlock :
- kNormalBlock, offset, bb->id), func_);
- if (entry_block) {
- entry_bb_ = llvm_bb;
- placeholder_bb_ =
- ::llvm::BasicBlock::Create(*context_, "placeholder",
- func_);
- }
- id_to_block_map_.Put(bb->id, llvm_bb);
- }
- return false;
-}
-
-
-/*
- * Convert MIR to LLVM_IR
- * o For each ssa name, create LLVM named value. Type these
- * appropriately, and ignore high half of wide and double operands.
- * o For each MIR basic block, create an LLVM basic block.
- * o Iterate through the MIR a basic block at a time, setting arguments
- * to recovered ssa name.
- */
-void MirConverter::MethodMIR2Bitcode() {
- InitIR();
-
- // Create the function
- CreateFunction();
-
- // Create an LLVM basic block for each MIR block in dfs preorder
- PreOrderDfsIterator iter(mir_graph_);
- for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
- CreateLLVMBasicBlock(bb);
- }
-
- /*
- * Create an llvm named value for each MIR SSA name. Note: we'll use
- * placeholders for all non-argument values (because we haven't seen
- * the definition yet).
- */
- irb_->SetInsertPoint(placeholder_bb_);
- ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
- arg_iter++; /* Skip path method */
- for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) {
- ::llvm::Value* val;
- RegLocation rl_temp = mir_graph_->reg_location_[i];
- if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
- llvm_values_.push_back(0);
- } else if ((i < mir_graph_->GetFirstInVR()) ||
- (i >= (mir_graph_->GetFirstTempVR()))) {
- ::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
- irb_->getJLong(0) : irb_->getJInt(0);
- val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
- val->setName(mir_graph_->GetSSAName(i));
- llvm_values_.push_back(val);
- } else {
- // Recover previously-created argument values
- ::llvm::Value* arg_val = arg_iter++;
- llvm_values_.push_back(arg_val);
- }
- }
-
- PreOrderDfsIterator iter2(mir_graph_);
- for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
- BlockBitcodeConversion(bb);
- }
-
- /*
- * In a few rare cases of verification failure, the verifier will
- * replace one or more Dalvik opcodes with the special
- * throw-verification-failure opcode. This can leave the SSA graph
- * in an invalid state, as definitions may be lost, while uses retained.
- * To work around this problem, we insert placeholder definitions for
- * all Dalvik SSA regs in the "placeholder" block. Here, after
- * bitcode conversion is complete, we examine those placeholder definitions
- * and delete any with no references (which normally is all of them).
- *
- * If any definitions remain, we link the placeholder block into the
- * CFG. Otherwise, it is deleted.
- */
- for (::llvm::BasicBlock::iterator it = placeholder_bb_->begin(),
- it_end = placeholder_bb_->end(); it != it_end;) {
- ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(it++);
- DCHECK(inst != NULL);
- ::llvm::Value* val = ::llvm::dyn_cast< ::llvm::Value>(inst);
- DCHECK(val != NULL);
- if (val->getNumUses() == 0) {
- inst->eraseFromParent();
- }
- }
- SetDexOffset(0);
- if (placeholder_bb_->empty()) {
- placeholder_bb_->eraseFromParent();
- } else {
- irb_->SetInsertPoint(placeholder_bb_);
- irb_->CreateBr(entry_target_bb_);
- entry_target_bb_ = placeholder_bb_;
- }
- irb_->SetInsertPoint(entry_bb_);
- irb_->CreateBr(entry_target_bb_);
-
- if (cu_->enable_debug & (1 << kDebugVerifyBitcode)) {
- if (::llvm::verifyFunction(*func_, ::llvm::PrintMessageAction)) {
- LOG(INFO) << "Bitcode verification FAILED for "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file)
- << " of size " << mir_graph_->GetNumDalvikInsns();
- cu_->enable_debug |= (1 << kDebugDumpBitcodeFile);
- }
- }
-
- if (cu_->enable_debug & (1 << kDebugDumpBitcodeFile)) {
- // Write bitcode to file
- std::string errmsg;
- std::string fname(PrettyMethod(cu_->method_idx, *cu_->dex_file));
- mir_graph_->ReplaceSpecialChars(fname);
- // TODO: make configurable change naming mechanism to avoid fname length issues.
- fname = StringPrintf("/sdcard/Bitcode/%s.bc", fname.c_str());
-
- if (fname.size() > 240) {
- LOG(INFO) << "Warning: bitcode filename too long. Truncated.";
- fname.resize(240);
- }
-
- ::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
- new ::llvm::tool_output_file(fname.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
-
- if (!errmsg.empty()) {
- LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
- }
-
- ::llvm::WriteBitcodeToFile(module_, out_file->os());
- out_file->keep();
- }
-}
-
-Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena,
- llvm::LlvmCompilationUnit* const llvm_compilation_unit) {
- return new MirConverter(cu, mir_graph, arena, llvm_compilation_unit);
-}
-
-} // namespace art
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
deleted file mode 100644
index bc4f5c4100..0000000000
--- a/compiler/dex/portable/mir_to_gbc.h
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
-#define ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
-
-#include <llvm/ADT/ArrayRef.h>
-#include <llvm/IR/BasicBlock.h>
-#include <llvm/IR/IRBuilder.h>
-#include <llvm/IR/LLVMContext.h>
-#include <llvm/IR/Module.h>
-
-#include "invoke_type.h"
-#include "compiled_method.h"
-#include "dex/compiler_enums.h"
-#include "dex/compiler_ir.h"
-#include "dex/backend.h"
-#include "llvm/intrinsic_helper.h"
-#include "llvm/llvm_compilation_unit.h"
-#include "safe_map.h"
-#include "utils/arena_containers.h"
-
-namespace llvm {
- class Module;
- class LLVMContext;
-}
-
-namespace art {
-
-namespace llvm {
- class IntrinsicHelper;
- class IRBuilder;
-}
-
-class LLVMInfo {
- public:
- LLVMInfo();
- ~LLVMInfo();
-
- ::llvm::LLVMContext* GetLLVMContext() {
- return llvm_context_.get();
- }
-
- ::llvm::Module* GetLLVMModule() {
- return llvm_module_;
- }
-
- art::llvm::IntrinsicHelper* GetIntrinsicHelper() {
- return intrinsic_helper_.get();
- }
-
- art::llvm::IRBuilder* GetIRBuilder() {
- return ir_builder_.get();
- }
-
- private:
- std::unique_ptr< ::llvm::LLVMContext> llvm_context_;
- ::llvm::Module* llvm_module_; // Managed by context_.
- std::unique_ptr<art::llvm::IntrinsicHelper> intrinsic_helper_;
- std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
-};
-
-class BasicBlock;
-struct CallInfo;
-struct CompilationUnit;
-struct MIR;
-struct RegLocation;
-struct RegisterInfo;
-class MIRGraph;
-
-// Target-specific initialization.
-Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena,
- llvm::LlvmCompilationUnit* const llvm_compilation_unit);
-
-class MirConverter : public Backend {
- public:
- // TODO: flesh out and integrate into new world order.
- MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena,
- llvm::LlvmCompilationUnit* llvm_compilation_unit)
- : Backend(arena),
- cu_(cu),
- mir_graph_(mir_graph),
- llvm_compilation_unit_(llvm_compilation_unit),
- llvm_info_(llvm_compilation_unit->GetQuickContext()),
- symbol_(llvm_compilation_unit->GetDexCompilationUnit()->GetSymbol()),
- context_(NULL),
- module_(NULL),
- func_(NULL),
- intrinsic_helper_(NULL),
- irb_(NULL),
- placeholder_bb_(NULL),
- entry_bb_(NULL),
- entry_target_bb_(NULL),
- llvm_values_(arena->Adapter()),
- temp_name_(0),
- current_dalvik_offset_(0) {
- llvm_values_.reserve(mir_graph->GetNumSSARegs());
- if (kIsDebugBuild) {
- cu->enable_debug |= (1 << kDebugVerifyBitcode);
- }
- }
-
- void Materialize() {
- MethodMIR2Bitcode();
- }
-
- CompiledMethod* GetCompiledMethod() {
- return NULL;
- }
-
- private:
- ::llvm::BasicBlock* GetLLVMBlock(int id);
- ::llvm::Value* GetLLVMValue(int s_reg);
- void SetVregOnValue(::llvm::Value* val, int s_reg);
- void DefineValueOnly(::llvm::Value* val, int s_reg);
- void DefineValue(::llvm::Value* val, int s_reg);
- ::llvm::Type* LlvmTypeFromLocRec(RegLocation loc);
- void InitIR();
- ::llvm::BasicBlock* FindCaseTarget(uint32_t vaddr);
- void ConvertPackedSwitch(BasicBlock* bb, MIR* mir, int32_t table_offset,
- RegLocation rl_src);
- void ConvertSparseSwitch(BasicBlock* bb, MIR* mir, int32_t table_offset,
- RegLocation rl_src);
- void ConvertSget(int32_t field_index,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
- void ConvertSput(int32_t field_index,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
- void ConvertFillArrayData(int32_t offset, RegLocation rl_array);
- ::llvm::Value* EmitConst(::llvm::ArrayRef< ::llvm::Value*> src,
- RegLocation loc);
- void EmitPopShadowFrame();
- ::llvm::Value* EmitCopy(::llvm::ArrayRef< ::llvm::Value*> src,
- RegLocation loc);
- void ConvertMoveException(RegLocation rl_dest);
- void ConvertThrow(RegLocation rl_src);
- void ConvertMonitorEnterExit(int opt_flags,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_src);
- void ConvertArrayLength(int opt_flags, RegLocation rl_dest,
- RegLocation rl_src);
- void EmitSuspendCheck();
- ::llvm::Value* ConvertCompare(ConditionCode cc,
- ::llvm::Value* src1, ::llvm::Value* src2);
- void ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
- RegLocation rl_src1, RegLocation rl_src2);
- void ConvertCompareZeroAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
- RegLocation rl_src1);
- ::llvm::Value* GenDivModOp(bool is_div, bool is_long, ::llvm::Value* src1,
- ::llvm::Value* src2);
- ::llvm::Value* GenArithOp(OpKind op, bool is_long, ::llvm::Value* src1,
- ::llvm::Value* src2);
- void ConvertFPArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void ConvertShift(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void ConvertShiftLit(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src, int shift_amount);
- void ConvertArithOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void ConvertArithOpLit(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
- int32_t imm);
- void ConvertInvoke(BasicBlock* bb, MIR* mir, InvokeType invoke_type,
- bool is_range, bool is_filled_new_array);
- void ConvertConstObject(uint32_t idx,
- art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
- void ConvertCheckCast(uint32_t type_idx, RegLocation rl_src);
- void ConvertNewInstance(uint32_t type_idx, RegLocation rl_dest);
- void ConvertNewArray(uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
- void ConvertAget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_array, RegLocation rl_index);
- void ConvertAput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_src, RegLocation rl_array, RegLocation rl_index);
- void ConvertIget(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_obj, int field_index);
- void ConvertIput(int opt_flags, art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_src, RegLocation rl_obj, int field_index);
- void ConvertInstanceOf(uint32_t type_idx, RegLocation rl_dest,
- RegLocation rl_src);
- void ConvertIntToLong(RegLocation rl_dest, RegLocation rl_src);
- void ConvertLongToInt(RegLocation rl_dest, RegLocation rl_src);
- void ConvertFloatToDouble(RegLocation rl_dest, RegLocation rl_src);
- void ConvertDoubleToFloat(RegLocation rl_dest, RegLocation rl_src);
- void ConvertWideComparison(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void ConvertIntNarrowing(RegLocation rl_dest, RegLocation rl_src,
- art::llvm::IntrinsicHelper::IntrinsicId id);
- void ConvertNeg(RegLocation rl_dest, RegLocation rl_src);
- void ConvertIntToFP(::llvm::Type* ty, RegLocation rl_dest, RegLocation rl_src);
- void ConvertFPToInt(art::llvm::IntrinsicHelper::IntrinsicId id,
- RegLocation rl_dest, RegLocation rl_src);
- void ConvertNegFP(RegLocation rl_dest, RegLocation rl_src);
- void ConvertNot(RegLocation rl_dest, RegLocation rl_src);
- void EmitConstructorBarrier();
- bool ConvertMIRNode(MIR* mir, BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
- void SetDexOffset(int32_t offset);
- void SetMethodInfo();
- void HandlePhiNodes(BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
- void ConvertExtendedMIR(BasicBlock* bb, MIR* mir, ::llvm::BasicBlock* llvm_bb);
- bool BlockBitcodeConversion(BasicBlock* bb);
- ::llvm::FunctionType* GetFunctionType();
- bool CreateFunction();
- bool CreateLLVMBasicBlock(BasicBlock* bb);
- void MethodMIR2Bitcode();
-
- CompilationUnit* cu_;
- MIRGraph* mir_graph_;
- llvm::LlvmCompilationUnit* const llvm_compilation_unit_;
- LLVMInfo* llvm_info_;
- std::string symbol_;
- ::llvm::LLVMContext* context_;
- ::llvm::Module* module_;
- ::llvm::Function* func_;
- art::llvm::IntrinsicHelper* intrinsic_helper_;
- art::llvm::IRBuilder* irb_;
- ::llvm::BasicBlock* placeholder_bb_;
- ::llvm::BasicBlock* entry_bb_;
- ::llvm::BasicBlock* entry_target_bb_;
- std::string bitcode_filename_;
- ArenaVector< ::llvm::Value*> llvm_values_;
- int32_t temp_name_;
- SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map_; // block id -> llvm bb.
- int current_dalvik_offset_;
-}; // Class MirConverter
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_PORTABLE_MIR_TO_GBC_H_
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index b9d9a1111d..5d09ae1cf3 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -481,10 +481,10 @@ enum ArmOpcode {
kThumb2LsrRRR, // lsr [111110100010] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
kThumb2AsrRRR, // asr [111110100100] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
kThumb2RorRRR, // ror [111110100110] rn[19..16] [1111] rd[11..8] [0000] rm[3..0].
- kThumb2LslRRI5, // lsl [11101010010011110] imm[14.12] rd[11..8] [00] rm[3..0].
- kThumb2LsrRRI5, // lsr [11101010010011110] imm[14.12] rd[11..8] [01] rm[3..0].
- kThumb2AsrRRI5, // asr [11101010010011110] imm[14.12] rd[11..8] [10] rm[3..0].
- kThumb2RorRRI5, // ror [11101010010011110] imm[14.12] rd[11..8] [11] rm[3..0].
+ kThumb2LslRRI5, // lsl [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [00] rm[3..0].
+ kThumb2LsrRRI5, // lsr [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [01] rm[3..0].
+ kThumb2AsrRRI5, // asr [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [10] rm[3..0].
+ kThumb2RorRRI5, // ror [11101010010011110] imm3[14..12] rd[11..8] imm2[7..6] [11] rm[3..0].
kThumb2BicRRI8M, // bic rd, rn, #<const> [11110] i [000010] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2AndRRI8M, // and rd, rn, #<const> [11110] i [000000] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
kThumb2OrrRRI8M, // orr rd, rn, #<const> [11110] i [000100] rn[19..16] [0] imm3[14..12] rd[11..8] imm8[7..0].
@@ -512,7 +512,8 @@ enum ArmOpcode {
kThumb2Vnegs, // vneg.f32 [111011101] D [110000] rd[15-12] [1010110] M [0] vm[3-0].
kThumb2Vmovs_IMM8, // vmov.f32 [111011101] D [11] imm4h[19-16] vd[15-12] [10100000] imm4l[3-0].
kThumb2Vmovd_IMM8, // vmov.f64 [111011101] D [11] imm4h[19-16] vd[15-12] [10110000] imm4l[3-0].
- kThumb2Mla, // mla [111110110000] rn[19-16] ra[15-12] rd[7-4] [0000] rm[3-0].
+ kThumb2Mla, // mla [111110110000] rn[19-16] ra[15-12] rd[11-8] [0000] rm[3-0].
+ kThumb2Mls, // mls [111110110000] rn[19-16] ra[15-12] rd[11-8] [0001] rm[3-0].
kThumb2Umull, // umull [111110111010] rn[19-16], rdlo[15-12] rdhi[11-8] [0000] rm[3-0].
kThumb2Ldrex, // ldrex [111010000101] rn[19-16] rt[15-12] [1111] imm8[7-0].
kThumb2Ldrexd, // ldrexd [111010001101] rn[19-16] rt[15-12] rt2[11-8] [11111111].
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index de93e2602b..65fb3cd393 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -896,6 +896,10 @@ const ArmEncodingMap ArmMir2Lir::EncodingMap[kArmLast] = {
kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE123,
"mla", "!0C, !1C, !2C, !3C", 4, kFixupNone),
+ ENCODING_MAP(kThumb2Mls, 0xfb000010,
+ kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16, kFmtBitBlt, 3, 0,
+ kFmtBitBlt, 15, 12, IS_QUAD_OP | REG_DEF0_USE123,
+ "mls", "!0C, !1C, !2C, !3C", 4, kFixupNone),
ENCODING_MAP(kThumb2Umull, 0xfba00000,
kFmtBitBlt, 15, 12, kFmtBitBlt, 11, 8, kFmtBitBlt, 19, 16,
kFmtBitBlt, 3, 0,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 0ae7ee3560..fa8dfe326f 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -182,6 +182,8 @@ class ArmMir2Lir FINAL : public Mir2Lir {
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
+ void GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target);
@@ -259,6 +261,8 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
size_t GetInstructionOffset(LIR* lir);
+ void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
+
private:
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 1a7b4395d8..fe1d12610a 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -1075,6 +1075,17 @@ LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
return NewLIR3(kThumb2Vstms, r_base.GetReg(), rs_fr0.GetReg(), count);
}
+void ArmMir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub) {
+ rl_src1 = LoadValue(rl_src1, kCoreReg);
+ rl_src2 = LoadValue(rl_src2, kCoreReg);
+ rl_src3 = LoadValue(rl_src3, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR4(is_sub ? kThumb2Mls : kThumb2Mla, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7190a49c26..d3743531fb 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -948,4 +948,30 @@ int ArmMir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
return count;
}
+void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
+ DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
+ RegLocation rl_src[3];
+ RegLocation rl_dest = mir_graph_->GetBadLoc();
+ rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
+ switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
+ case kMirOpMaddInt:
+ rl_dest = mir_graph_->GetDest(mir);
+ rl_src[0] = mir_graph_->GetSrc(mir, 0);
+ rl_src[1] = mir_graph_->GetSrc(mir, 1);
+ rl_src[2]= mir_graph_->GetSrc(mir, 2);
+ GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], false);
+ break;
+ case kMirOpMsubInt:
+ rl_dest = mir_graph_->GetDest(mir);
+ rl_src[0] = mir_graph_->GetSrc(mir, 0);
+ rl_src[1] = mir_graph_->GetSrc(mir, 1);
+ rl_src[2]= mir_graph_->GetSrc(mir, 2);
+ GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2], true);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << mir->dalvikInsn.opcode;
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index f8a7310c20..943c5c18d4 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -312,6 +312,7 @@ enum A64Opcode {
kA64Lsl3rrr, // lsl [s0011010110] rm[20-16] [001000] rn[9-5] rd[4-0].
kA64Lsr3rrd, // lsr alias of "ubfm arg0, arg1, arg2, #{31/63}".
kA64Lsr3rrr, // lsr [s0011010110] rm[20-16] [001001] rn[9-5] rd[4-0].
+ kA64Madd4rrrr, // madd[s0011011000] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
kA64Movk3rdM, // mov [010100101] hw[22-21] imm_16[20-5] rd[4-0].
kA64Movn3rdM, // mov [000100101] hw[22-21] imm_16[20-5] rd[4-0].
kA64Movz3rdM, // mov [011100101] hw[22-21] imm_16[20-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index cab11cc4a5..d45ec497f0 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -445,6 +445,10 @@ const A64EncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"lsr", "!0r, !1r, !2r", kFixupNone),
+ ENCODING_MAP(WIDE(kA64Madd4rrrr), SF_VARIANTS(0x1b000000),
+ kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
+ kFmtRegR, 14, 10, IS_QUAD_OP | REG_DEF0_USE123 | NEEDS_FIXUP,
+ "madd", "!0r, !1r, !2r, !3r", kFixupA53Erratum835769),
ENCODING_MAP(WIDE(kA64Movk3rdM), SF_VARIANTS(0x72800000),
kFmtRegR, 4, 0, kFmtBitBlt, 20, 5, kFmtBitBlt, 22, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE0,
@@ -840,6 +844,20 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
// are better set directly from the code (they will require no more than 2 instructions).
#define ALIGNED_DATA_OFFSET(offset) (((offset) + 0x7) & ~0x7)
+/*
+ * Get the LIR which emits the instruction preceding the given LIR.
+ * Returns nullptr, if no previous emitting insn found.
+ */
+static LIR* GetPrevEmittingLIR(LIR* lir) {
+ DCHECK(lir != nullptr);
+ LIR* prev_lir = lir->prev;
+ while ((prev_lir != nullptr) &&
+ (prev_lir->flags.is_nop || Mir2Lir::IsPseudoLirOp(prev_lir->opcode))) {
+ prev_lir = prev_lir->prev;
+ }
+ return prev_lir;
+}
+
// Assemble the LIR into binary instruction format.
void Arm64Mir2Lir::AssembleLIR() {
LIR* lir;
@@ -1002,7 +1020,11 @@ void Arm64Mir2Lir::AssembleLIR() {
->NeedFixCortexA53_835769()) {
// Check that this is a 64-bit multiply-accumulate.
if (IS_WIDE(lir->opcode)) {
- uint64_t prev_insn_flags = EncodingMap[UNWIDE(lir->prev->opcode)].flags;
+ LIR* prev_insn = GetPrevEmittingLIR(lir);
+ if (prev_insn == nullptr) {
+ break;
+ }
+ uint64_t prev_insn_flags = EncodingMap[UNWIDE(prev_insn->opcode)].flags;
// Check that the instruction preceding the multiply-accumulate is a load or store.
if ((prev_insn_flags & IS_LOAD) != 0 || (prev_insn_flags & IS_STORE) != 0) {
// insert a NOP between the load/store and the multiply-accumulate.
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 766ac23ef9..55866e2eb8 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -193,6 +193,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+ void GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub);
+ void GenMaddMsubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub);
// Required for target - single operation generators.
LIR* OpUnconditionalBranch(LIR* target) OVERRIDE;
@@ -226,6 +230,8 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
bool InexpensiveConstantLong(int64_t value) OVERRIDE;
bool InexpensiveConstantDouble(int64_t value) OVERRIDE;
+ void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
+
bool WideGPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 57e67d534b..5ac2aa080d 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -949,10 +949,33 @@ LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
UNREACHABLE();
}
+void Arm64Mir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub) {
+ rl_src1 = LoadValue(rl_src1, kCoreReg);
+ rl_src2 = LoadValue(rl_src2, kCoreReg);
+ rl_src3 = LoadValue(rl_src3, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR4(is_sub ? kA64Msub4rrrr : kA64Madd4rrrr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Arm64Mir2Lir::GenMaddMsubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ RegLocation rl_src3, bool is_sub) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ rl_src3 = LoadValueWide(rl_src3, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR4(is_sub ? WIDE(kA64Msub4rrrr) : WIDE(kA64Madd4rrrr), rl_result.reg.GetReg(),
+ rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), rl_src3.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
+ OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
+ EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
@@ -1686,7 +1709,8 @@ bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
RegLocation rl_src_i = info->args[0];
RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
+ RegLocation rl_i = IsWide(size) ?
+ LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index e7fa8ed475..030c5ed2f4 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -849,4 +849,35 @@ int Arm64Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* /*info*/, int /*first*/, int c
return count;
}
+void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
+ DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
+ RegLocation rl_src[3];
+ RegLocation rl_dest = mir_graph_->GetBadLoc();
+ rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
+ ExtendedMIROpcode opcode = static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode);
+ switch (opcode) {
+ case kMirOpMaddInt:
+ case kMirOpMsubInt:
+ rl_dest = mir_graph_->GetDest(mir);
+ rl_src[0] = mir_graph_->GetSrc(mir, 0);
+ rl_src[1] = mir_graph_->GetSrc(mir, 1);
+ rl_src[2]= mir_graph_->GetSrc(mir, 2);
+ GenMaddMsubInt(rl_dest, rl_src[0], rl_src[1], rl_src[2],
+ (opcode == kMirOpMsubInt) ? true : false);
+ break;
+ case kMirOpMaddLong:
+ case kMirOpMsubLong:
+ rl_dest = mir_graph_->GetDestWide(mir);
+ rl_src[0] = mir_graph_->GetSrcWide(mir, 0);
+ rl_src[1] = mir_graph_->GetSrcWide(mir, 2);
+ rl_src[2] = mir_graph_->GetSrcWide(mir, 4);
+ GenMaddMsubLong(rl_dest, rl_src[0], rl_src[1], rl_src[2],
+ (opcode == kMirOpMsubLong) ? true : false);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << static_cast<int>(opcode);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 774176ebb1..50014b05b1 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -2163,18 +2163,15 @@ class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTest(int opt_flags) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
+ return;
+ }
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- return;
- }
FlushAllRegs();
LIR* branch = OpTestSuspend(NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
AddSlowPath(new (arena_) SuspendCheckSlowPath(this, branch, cont));
} else {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- return;
- }
FlushAllRegs(); // TODO: needed?
LIR* inst = CheckSuspendUsingLoad();
MarkSafepointPC(inst);
@@ -2183,11 +2180,11 @@ void Mir2Lir::GenSuspendTest(int opt_flags) {
/* Check if we need to check for pending suspend request */
void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
+ if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK) != 0) {
+ OpUnconditionalBranch(target);
+ return;
+ }
if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitSuspendChecks()) {
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- OpUnconditionalBranch(target);
- return;
- }
OpTestSuspend(target);
FlushAllRegs();
LIR* branch = OpUnconditionalBranch(nullptr);
@@ -2195,10 +2192,6 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
} else {
// For the implicit suspend check, just perform the trigger
// load and branch to the target.
- if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
- OpUnconditionalBranch(target);
- return;
- }
FlushAllRegs();
LIR* inst = CheckSuspendUsingLoad();
MarkSafepointPC(inst);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index bd88091add..524ee21e63 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -549,8 +549,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32:
- if (mir_graph_->IsBackedge(bb, bb->taken) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken))) {
+ if (mir_graph_->IsBackEdge(bb, bb->taken)) {
GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
} else {
OpUnconditionalBranch(&label_list[bb->taken]);
@@ -582,12 +581,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- LIR* taken = &label_list[bb->taken];
- if (mir_graph_->IsBackwardsBranch(bb) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
- !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
GenSuspendTest(opt_flags);
}
+ LIR* taken = &label_list[bb->taken];
GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
break;
}
@@ -597,12 +594,10 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- LIR* taken = &label_list[bb->taken];
- if (mir_graph_->IsBackwardsBranch(bb) &&
- (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
- !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
GenSuspendTest(opt_flags);
}
+ LIR* taken = &label_list[bb->taken];
GenCompareZeroAndBranch(opcode, rl_src[0], taken);
break;
}
@@ -771,69 +766,37 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::INVOKE_STATIC_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
- if (!kLeafOptimization) {
- // If the invocation is not inlined, we can assume there is already a
- // suspend check at the return site
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_STATIC:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_DIRECT:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_DIRECT_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_VIRTUAL:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_SUPER:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_SUPER_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_INTERFACE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::INVOKE_INTERFACE_RANGE:
GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
- if (!kLeafOptimization) {
- mir_graph_->AppendGenSuspendTestList(bb);
- }
break;
case Instruction::NEG_INT:
@@ -1034,18 +997,33 @@ void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
break;
}
case kMirOpFusedCmplFloat:
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
+ GenSuspendTest(mir->optimization_flags);
+ }
GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
break;
case kMirOpFusedCmpgFloat:
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
+ GenSuspendTest(mir->optimization_flags);
+ }
GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
break;
case kMirOpFusedCmplDouble:
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
+ GenSuspendTest(mir->optimization_flags);
+ }
GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
break;
case kMirOpFusedCmpgDouble:
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
+ GenSuspendTest(mir->optimization_flags);
+ }
GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
break;
case kMirOpFusedCmpLong:
+ if (mir_graph_->IsBackEdge(bb, bb->taken) || mir_graph_->IsBackEdge(bb, bb->fall_through)) {
+ GenSuspendTest(mir->optimization_flags);
+ }
GenFusedLongCmpBranch(bb, mir);
break;
case kMirOpSelect:
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index dd0933018f..a2b85ffb6c 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -843,8 +843,8 @@ class Mir2Lir : public Backend {
virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2, int flags);
void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
- virtual void GenSuspendTest(int opt_flags);
- virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
+ void GenSuspendTest(int opt_flags);
+ void GenSuspendTestAndBranch(int opt_flags, LIR* target);
// This will be overridden by x86 implementation.
virtual void GenConstWide(RegLocation rl_dest, int64_t value);
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8d4cb3c5e9..c14e22e090 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -19,6 +19,7 @@
#include <cstdint>
#include "compiler.h"
+#include "dex_file-inl.h"
#include "dex/frontend.h"
#include "dex/mir_graph.h"
#include "dex/quick/mir_to_lir.h"
@@ -392,10 +393,10 @@ static int kAllOpcodes[] = {
Instruction::IPUT_BYTE_QUICK,
Instruction::IPUT_CHAR_QUICK,
Instruction::IPUT_SHORT_QUICK,
- Instruction::UNUSED_EF,
- Instruction::UNUSED_F0,
- Instruction::UNUSED_F1,
- Instruction::UNUSED_F2,
+ Instruction::IGET_BOOLEAN_QUICK,
+ Instruction::IGET_BYTE_QUICK,
+ Instruction::IGET_CHAR_QUICK,
+ Instruction::IGET_SHORT_QUICK,
Instruction::UNUSED_F3,
Instruction::UNUSED_F4,
Instruction::UNUSED_F5,
@@ -588,17 +589,6 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) {
- return method;
- }
-
// TODO: check method fingerprint here to determine appropriate backend type. Until then, use
// build default.
CompilerDriver* driver = GetCompilerDriver();
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index b80346286c..7cd431e26c 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -197,12 +197,6 @@ void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
dom_post_order_traversal_.push_back(curr_bb->id);
}
work_stack.pop_back();
-
- /* hacky loop detection */
- if ((curr_bb->taken != NullBasicBlockId) && curr_bb->dominators->IsBitSet(curr_bb->taken)) {
- curr_bb->nesting_depth++;
- attributes_ |= METHOD_HAS_LOOP;
- }
}
}
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 932a532e56..60d24068b1 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,18 +106,8 @@ bool VerificationResults::IsClassRejected(ClassReference ref) {
return (rejected_classes_.find(ref) != rejected_classes_.end());
}
-bool VerificationResults::IsCandidateForCompilation(MethodReference& method_ref,
+bool VerificationResults::IsCandidateForCompilation(MethodReference&,
const uint32_t access_flags) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = compiler_options_->GetSeaIrMode();
- use_sea = use_sea && (std::string::npos != PrettyMethod(
- method_ref.dex_method_index, *(method_ref.dex_file)).find("fibonacci"));
- if (use_sea) {
- return true;
- }
-#else
- UNUSED(method_ref);
-#endif
if (!compiler_options_->IsCompilationEnabled()) {
return false;
}
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 17328c4a5b..d684bc9006 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -82,13 +82,13 @@ bool VerifiedMethod::GenerateGcMap(verifier::MethodVerifier* method_verifier) {
size_t num_entries, ref_bitmap_bits, pc_bits;
ComputeGcMapSizes(method_verifier, &num_entries, &ref_bitmap_bits, &pc_bits);
// There's a single byte to encode the size of each bitmap.
- if (ref_bitmap_bits >= (8 /* bits per byte */ * 8192 /* 13-bit size */ )) {
+ if (ref_bitmap_bits >= kBitsPerByte * 8192 /* 13-bit size */) {
// TODO: either a better GC map format or per method failures
method_verifier->Fail(verifier::VERIFY_ERROR_BAD_CLASS_HARD)
<< "Cannot encode GC map for method with " << ref_bitmap_bits << " registers";
return false;
}
- size_t ref_bitmap_bytes = (ref_bitmap_bits + 7) / 8;
+ size_t ref_bitmap_bytes = RoundUp(ref_bitmap_bits, kBitsPerByte) / kBitsPerByte;
// There are 2 bytes to encode the number of entries.
if (num_entries >= 65536) {
// TODO: Either a better GC map format or per method failures.
@@ -98,10 +98,10 @@ bool VerifiedMethod::GenerateGcMap(verifier::MethodVerifier* method_verifier) {
}
size_t pc_bytes;
verifier::RegisterMapFormat format;
- if (pc_bits <= 8) {
+ if (pc_bits <= kBitsPerByte) {
format = verifier::kRegMapFormatCompact8;
pc_bytes = 1;
- } else if (pc_bits <= 16) {
+ } else if (pc_bits <= kBitsPerByte * 2) {
format = verifier::kRegMapFormatCompact16;
pc_bytes = 2;
} else {
@@ -152,10 +152,10 @@ void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier,
verifier::RegisterLine* line = method_verifier->GetRegLine(i);
for (size_t j = 0; j < code_item->registers_size_; j++) {
if (line->GetRegisterType(method_verifier, j).IsNonZeroReferenceTypes()) {
- DCHECK_LT(j / 8, map.RegWidth());
- DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1);
- } else if ((j / 8) < map.RegWidth()) {
- DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 0);
+ DCHECK_LT(j / kBitsPerByte, map.RegWidth());
+ DCHECK_EQ((reg_bitmap[j / kBitsPerByte] >> (j % kBitsPerByte)) & 1, 1);
+ } else if ((j / kBitsPerByte) < map.RegWidth()) {
+ DCHECK_EQ((reg_bitmap[j / kBitsPerByte] >> (j % kBitsPerByte)) & 1, 0);
} else {
// If a register doesn't contain a reference then the bitmap may be shorter than the line.
}
@@ -190,6 +190,31 @@ void VerifiedMethod::ComputeGcMapSizes(verifier::MethodVerifier* method_verifier
*log2_max_gc_pc = i;
}
+void VerifiedMethod::GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier) {
+ if (method_verifier->HasFailures()) {
+ return;
+ }
+ const DexFile::CodeItem* code_item = method_verifier->CodeItem();
+ const uint16_t* insns = code_item->insns_;
+ const Instruction* inst = Instruction::At(insns);
+ const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
+ for (; inst < end; inst = inst->Next()) {
+ const bool is_virtual_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK;
+ const bool is_range_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK;
+ if (is_virtual_quick || is_range_quick) {
+ uint32_t dex_pc = inst->GetDexPc(insns);
+ verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
+ mirror::ArtMethod* method = method_verifier->GetQuickInvokedMethod(inst, line,
+ is_range_quick);
+ CHECK(method != nullptr);
+ // The verifier must know what the type of the object was or else we would have gotten a
+ // failure. Put the dex method index in the dequicken map since we need this to get number of
+ // arguments in the compiler.
+ dequicken_map_.Put(dex_pc, method->ToMethodReference());
+ }
+ }
+}
+
void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) {
// It is risky to rely on reg_types for sharpening in cases of soft
// verification, we might end up sharpening to a wrong implementation. Just abort.
@@ -203,10 +228,10 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_);
for (; inst < end; inst = inst->Next()) {
- bool is_virtual = (inst->Opcode() == Instruction::INVOKE_VIRTUAL) ||
- (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE);
- bool is_interface = (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
- (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ const bool is_virtual = inst->Opcode() == Instruction::INVOKE_VIRTUAL ||
+ inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE;
+ const bool is_interface = inst->Opcode() == Instruction::INVOKE_INTERFACE ||
+ inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
if (!is_interface && !is_virtual) {
continue;
@@ -214,8 +239,8 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
// Get reg type for register holding the reference to the object that will be dispatched upon.
uint32_t dex_pc = inst->GetDexPc(insns);
verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
- bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
- (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
+ const bool is_range = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE ||
+ inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE;
const verifier::RegType&
reg_type(line->GetRegisterType(method_verifier,
is_range ? inst->VRegC_3rc() : inst->VRegC_35c()));
@@ -241,14 +266,14 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
continue;
}
// Find the concrete method.
- mirror::ArtMethod* concrete_method = NULL;
+ mirror::ArtMethod* concrete_method = nullptr;
if (is_interface) {
concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface(abstract_method);
}
if (is_virtual) {
concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual(abstract_method);
}
- if (concrete_method == NULL || concrete_method->IsAbstract()) {
+ if (concrete_method == nullptr || concrete_method->IsAbstract()) {
// In cases where concrete_method is not found, or is abstract, continue to the next invoke.
continue;
}
@@ -256,10 +281,7 @@ void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier
concrete_method->GetDeclaringClass()->IsFinal()) {
// If we knew exactly the class being dispatched upon, or if the target method cannot be
// overridden record the target to be used in the compiler driver.
- MethodReference concrete_ref(
- concrete_method->GetDeclaringClass()->GetDexCache()->GetDexFile(),
- concrete_method->GetDexMethodIndex());
- devirt_map_.Put(dex_pc, concrete_ref);
+ devirt_map_.Put(dex_pc, concrete_method->ToMethodReference());
}
}
}
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 257e70ce93..fe9dfd1cb0 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -85,12 +85,19 @@ class VerifiedMethod {
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Generate dequickening map into dequicken_map_.
+ void GenerateDeQuickenMap(verifier::MethodVerifier* method_verifier)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
std::vector<uint8_t> dex_gc_map_;
DevirtualizationMap devirt_map_;
+ // Dequicken map is required for having the compiler compiled quickened invokes. The quicken map
+ // enables us to get the dex method index so that we can get the required argument count.
+ DevirtualizationMap dequicken_map_;
SafeCastSet safe_cast_set_;
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e4274712d4..051b310f89 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -371,8 +371,6 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
- CHECK_PTHREAD_CALL(pthread_key_create, (&tls_key_, nullptr), "compiler tls key");
-
dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX);
compiler_->Init();
@@ -432,20 +430,9 @@ CompilerDriver::~CompilerDriver() {
MutexLock mu(self, compiled_methods_lock_);
STLDeleteValues(&compiled_methods_);
}
- CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
compiler_->UnInit();
}
-CompilerTls* CompilerDriver::GetTls() {
- // Lazily create thread-local storage
- CompilerTls* res = static_cast<CompilerTls*>(pthread_getspecific(tls_key_));
- if (res == nullptr) {
- res = compiler_->CreateNewCompilerTls();
- CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls");
- }
- return res;
-}
-
#define CREATE_TRAMPOLINE(type, abi, offset) \
if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
@@ -467,18 +454,6 @@ const std::vector<uint8_t>* CompilerDriver::CreateJniDlsymLookup() const {
CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup)
}
-const std::vector<uint8_t>* CompilerDriver::CreatePortableImtConflictTrampoline() const {
- CREATE_TRAMPOLINE(PORTABLE, kPortableAbi, pPortableImtConflictTrampoline)
-}
-
-const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
- CREATE_TRAMPOLINE(PORTABLE, kPortableAbi, pPortableResolutionTrampoline)
-}
-
-const std::vector<uint8_t>* CompilerDriver::CreatePortableToInterpreterBridge() const {
- CREATE_TRAMPOLINE(PORTABLE, kPortableAbi, pPortableToInterpreterBridge)
-}
-
const std::vector<uint8_t>* CompilerDriver::CreateQuickGenericJniTrampoline() const {
CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickGenericJniTrampoline)
}
@@ -1283,18 +1258,11 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType
// TODO This is somewhat hacky. We should refactor all of this invoke codepath.
const bool force_relocations = (compiling_boot ||
GetCompilerOptions().GetIncludePatchInformation());
- if (compiler_->IsPortable()) {
- if (sharp_type != kStatic && sharp_type != kDirect) {
- return;
- }
- use_dex_cache = true;
- } else {
- if (sharp_type != kStatic && sharp_type != kDirect) {
- return;
- }
- // TODO: support patching on all architectures.
- use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
+ if (sharp_type != kStatic && sharp_type != kDirect) {
+ return;
}
+ // TODO: support patching on all architectures.
+ use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_);
bool method_code_in_boot = (method->GetDeclaringClass()->GetClassLoader() == nullptr);
if (!use_dex_cache) {
if (!method_code_in_boot) {
@@ -1405,8 +1373,7 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui
if (resolved_method != nullptr) {
*vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type);
- if (enable_devirtualization) {
- DCHECK(mUnit->GetVerifiedMethod() != nullptr);
+ if (enable_devirtualization && mUnit->GetVerifiedMethod() != nullptr) {
const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc);
stats_flags = IsFastInvoke(
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 615e0d0db4..edc6468a85 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -66,8 +66,6 @@ enum EntryPointCallingConvention {
kInterpreterAbi,
// ABI of calls to a method's native code, only used for native methods.
kJniAbi,
- // ABI of calls to a method's portable code entry point.
- kPortableAbi,
// ABI of calls to a method's quick code entry point.
kQuickAbi
};
@@ -144,8 +142,6 @@ class CompilerDriver {
return image_classes_.get();
}
- CompilerTls* GetTls();
-
// Generate the trampolines that are invoked by unresolved direct methods.
const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -153,12 +149,6 @@ class CompilerDriver {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateJniDlsymLookup() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const std::vector<uint8_t>* CreatePortableImtConflictTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickImtConflictTrampoline() const
@@ -555,8 +545,6 @@ class CompilerDriver {
void* compiler_context_;
- pthread_key_t tls_key_;
-
// Arena pool used by the compiler.
ArenaPool arena_pool_;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 5a0ec2fa57..c30cc04e05 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -129,10 +129,6 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
<< " "
<< dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i))
<< " " << dex->GetMethodName(dex->GetMethodId(i));
- EXPECT_TRUE(method->GetEntryPointFromPortableCompiledCode() != NULL) << "method_idx=" << i
- << " "
- << dex->GetMethodDeclaringClassDescriptor(dex->GetMethodId(i))
- << " " << dex->GetMethodName(dex->GetMethodId(i));
}
EXPECT_EQ(dex->NumFieldIds(), dex_cache->NumResolvedFields());
for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) {
@@ -148,7 +144,6 @@ TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
}
TEST_F(CompilerDriverTest, AbstractMethodErrorStub) {
- TEST_DISABLED_FOR_PORTABLE();
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
jobject class_loader;
{
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index aec7d241f5..97699e599c 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -68,9 +68,6 @@ class CompilerOptions FINAL {
implicit_so_checks_(false),
implicit_suspend_checks_(false),
compile_pic_(false),
-#ifdef ART_SEA_IR_MODE
- sea_ir_mode_(false),
-#endif
verbose_methods_(nullptr),
init_failure_output_(nullptr) {
}
@@ -89,9 +86,6 @@ class CompilerOptions FINAL {
bool implicit_so_checks,
bool implicit_suspend_checks,
bool compile_pic,
-#ifdef ART_SEA_IR_MODE
- bool sea_ir_mode,
-#endif
const std::vector<std::string>* verbose_methods,
std::ostream* init_failure_output
) : // NOLINT(whitespace/parens)
@@ -109,9 +103,6 @@ class CompilerOptions FINAL {
implicit_so_checks_(implicit_so_checks),
implicit_suspend_checks_(implicit_suspend_checks),
compile_pic_(compile_pic),
-#ifdef ART_SEA_IR_MODE
- sea_ir_mode_(sea_ir_mode),
-#endif
verbose_methods_(verbose_methods),
init_failure_output_(init_failure_output) {
}
@@ -189,12 +180,6 @@ class CompilerOptions FINAL {
return implicit_suspend_checks_;
}
-#ifdef ART_SEA_IR_MODE
- bool GetSeaIrMode() const {
- return sea_ir_mode_;
- }
-#endif
-
bool GetGenerateGDBInformation() const {
return generate_gdb_information_;
}
@@ -242,10 +227,6 @@ class CompilerOptions FINAL {
const bool implicit_suspend_checks_;
const bool compile_pic_;
-#ifdef ART_SEA_IR_MODE
- const bool sea_ir_mode_;
-#endif
-
// Vector of methods to have verbose output enabled for.
const std::vector<std::string>* const verbose_methods_;
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
deleted file mode 100644
index 7705b9cf8a..0000000000
--- a/compiler/elf_writer_mclinker.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "elf_writer_mclinker.h"
-
-#include <llvm/Support/ELF.h>
-#include <llvm/Support/TargetSelect.h>
-
-#include <mcld/Environment.h>
-#include <mcld/IRBuilder.h>
-#include <mcld/Linker.h>
-#include <mcld/LinkerConfig.h>
-#include <mcld/LinkerScript.h>
-#include <mcld/MC/ZOption.h>
-#include <mcld/Module.h>
-#include <mcld/Support/Path.h>
-#include <mcld/Support/TargetSelect.h>
-
-#include "base/unix_file/fd_file.h"
-#include "class_linker.h"
-#include "dex_method_iterator.h"
-#include "driver/compiler_driver.h"
-#include "elf_file.h"
-#include "globals.h"
-#include "mirror/art_method.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-#include "oat_writer.h"
-#include "scoped_thread_state_change.h"
-#include "vector_output_stream.h"
-
-namespace art {
-
-ElfWriterMclinker::ElfWriterMclinker(const CompilerDriver& driver, File* elf_file)
- : ElfWriter(driver, elf_file), oat_input_(nullptr) {
-}
-
-ElfWriterMclinker::~ElfWriterMclinker() {
-}
-
-bool ElfWriterMclinker::Create(File* elf_file,
- OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host,
- const CompilerDriver& driver) {
- ElfWriterMclinker elf_writer(driver, elf_file);
- return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
-}
-
-bool ElfWriterMclinker::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host) {
- std::vector<uint8_t> oat_contents;
- oat_contents.reserve(oat_writer->GetSize());
-
- Init();
- mcld::LDSection* oat_section = AddOatInput(oat_writer, &oat_contents);
- if (kUsePortableCompiler) {
- AddMethodInputs(dex_files);
- AddRuntimeInputs(android_root, is_host);
- }
-
- // link inputs
- if (!linker_->link(*module_.get(), *ir_builder_.get())) {
- LOG(ERROR) << "Failed to link " << elf_file_->GetPath();
- return false;
- }
-
- // Fill oat_contents.
- VectorOutputStream output_stream("oat contents", &oat_contents);
- oat_writer->SetOatDataOffset(oat_section->offset());
- CHECK(oat_writer->Write(&output_stream));
- CHECK_EQ(oat_writer->GetSize(), oat_contents.size());
-
- // emit linked output
- // TODO: avoid dup of fd by fixing Linker::emit to not close the argument fd.
- int fd = dup(elf_file_->Fd());
- if (fd == -1) {
- PLOG(ERROR) << "Failed to dup file descriptor for " << elf_file_->GetPath();
- return false;
- }
- if (!linker_->emit(*module_.get(), fd)) {
- LOG(ERROR) << "Failed to emit " << elf_file_->GetPath();
- return false;
- }
- mcld::Finalize();
- LOG(INFO) << "ELF file written successfully: " << elf_file_->GetPath();
-
- oat_contents.clear();
- if (kUsePortableCompiler) {
- FixupOatMethodOffsets(dex_files);
- }
- return true;
-}
-
-static void InitializeLLVM() {
- // TODO: this is lifted from art's compiler_llvm.cc, should be factored out
- if (kIsTargetBuild) {
- llvm::InitializeNativeTarget();
- // TODO: odd that there is no InitializeNativeTargetMC?
- } else {
- llvm::InitializeAllTargets();
- llvm::InitializeAllTargetMCs();
- }
-}
-
-void ElfWriterMclinker::Init() {
- std::string target_triple;
- std::string target_cpu;
- std::string target_attr;
- CompilerDriver::InstructionSetToLLVMTarget(compiler_driver_->GetInstructionSet(),
- &target_triple,
- &target_cpu,
- &target_attr);
-
- // Based on mclinker's llvm-mcld.cpp main() and LinkerTest
- //
- // TODO: LinkerTest uses mcld::Initialize(), but it does an
- // llvm::InitializeAllTargets, which we don't want. Basically we
- // want mcld::InitializeNative, but it doesn't exist yet, so we
- // inline the minimal we need here.
- InitializeLLVM();
- mcld::InitializeAllTargets();
- mcld::InitializeAllLinkers();
- mcld::InitializeAllEmulations();
- mcld::InitializeAllDiagnostics();
-
- linker_config_.reset(new mcld::LinkerConfig(target_triple));
- CHECK(linker_config_.get() != NULL);
- linker_config_->setCodeGenType(mcld::LinkerConfig::DynObj);
- linker_config_->options().setSOName(elf_file_->GetPath());
-
- // error on undefined symbols.
- // TODO: should this just be set if kIsDebugBuild?
- linker_config_->options().setNoUndefined(true);
-
- if (compiler_driver_->GetInstructionSet() == kMips) {
- // MCLinker defaults MIPS section alignment to 0x10000, not
- // 0x1000. The ABI says this is because the max page size is
- // general is 64k but that isn't true on Android.
- mcld::ZOption z_option;
- z_option.setKind(mcld::ZOption::MaxPageSize);
- z_option.setPageSize(kPageSize);
- linker_config_->options().addZOption(z_option);
- }
-
- // TODO: Wire up mcld DiagnosticEngine to LOG?
- linker_config_->options().setColor(false);
- if (false) {
- // enables some tracing of input file processing
- linker_config_->options().setTrace(true);
- }
-
- // Based on alone::Linker::config
- linker_script_.reset(new mcld::LinkerScript());
- module_.reset(new mcld::Module(linker_config_->options().soname(), *linker_script_.get()));
- CHECK(module_.get() != NULL);
- ir_builder_.reset(new mcld::IRBuilder(*module_.get(), *linker_config_.get()));
- CHECK(ir_builder_.get() != NULL);
- linker_.reset(new mcld::Linker());
- CHECK(linker_.get() != NULL);
- linker_->emulate(*linker_script_.get(), *linker_config_.get());
-}
-
-mcld::LDSection* ElfWriterMclinker::AddOatInput(OatWriter* oat_writer,
- std::vector<uint8_t>* oat_contents) {
- // NOTE: oat_contents has sufficient reserved space but it doesn't contain the data yet.
- const char* oat_data_start = reinterpret_cast<const char*>(&(*oat_contents)[0]);
- const size_t oat_data_length = oat_writer->GetOatHeader().GetExecutableOffset();
- const char* oat_code_start = oat_data_start + oat_data_length;
- const size_t oat_code_length = oat_writer->GetSize() - oat_data_length;
-
- // TODO: ownership of oat_input?
- oat_input_ = ir_builder_->CreateInput("oat contents",
- mcld::sys::fs::Path("oat contents path"),
- mcld::Input::Object);
- CHECK(oat_input_ != NULL);
-
- // TODO: ownership of null_section?
- mcld::LDSection* null_section = ir_builder_->CreateELFHeader(*oat_input_,
- "",
- mcld::LDFileFormat::Null,
- SHT_NULL,
- 0);
- CHECK(null_section != NULL);
-
- // TODO: we should split readonly data from readonly executable
- // code like .oat does. We need to control section layout with
- // linker script like functionality to guarantee references
- // between sections maintain relative position which isn't
- // possible right now with the mclinker APIs.
- CHECK(oat_code_start != NULL);
-
- // we need to ensure that oatdata is page aligned so when we
- // fixup the segment load addresses, they remain page aligned.
- uint32_t alignment = kPageSize;
-
- // TODO: ownership of text_section?
- mcld::LDSection* text_section = ir_builder_->CreateELFHeader(*oat_input_,
- ".text",
- SHT_PROGBITS,
- SHF_EXECINSTR | SHF_ALLOC,
- alignment);
- CHECK(text_section != NULL);
-
- mcld::SectionData* text_sectiondata = ir_builder_->CreateSectionData(*text_section);
- CHECK(text_sectiondata != NULL);
-
- // TODO: why does IRBuilder::CreateRegion take a non-const pointer?
- mcld::Fragment* text_fragment = ir_builder_->CreateRegion(const_cast<char*>(oat_data_start),
- oat_writer->GetSize());
- CHECK(text_fragment != NULL);
- ir_builder_->AppendFragment(*text_fragment, *text_sectiondata);
-
- ir_builder_->AddSymbol(*oat_input_,
- "oatdata",
- mcld::ResolveInfo::Object,
- mcld::ResolveInfo::Define,
- mcld::ResolveInfo::Global,
- oat_data_length, // size
- 0, // offset
- text_section);
-
- ir_builder_->AddSymbol(*oat_input_,
- "oatexec",
- mcld::ResolveInfo::Function,
- mcld::ResolveInfo::Define,
- mcld::ResolveInfo::Global,
- oat_code_length, // size
- oat_data_length, // offset
- text_section);
-
- ir_builder_->AddSymbol(*oat_input_,
- "oatlastword",
- mcld::ResolveInfo::Object,
- mcld::ResolveInfo::Define,
- mcld::ResolveInfo::Global,
- 0, // size
- // subtract a word so symbol is within section
- (oat_data_length + oat_code_length) - sizeof(uint32_t), // offset
- text_section);
-
- return text_section;
-}
-
-void ElfWriterMclinker::AddMethodInputs(const std::vector<const DexFile*>& dex_files) {
- DCHECK(oat_input_ != NULL);
-
- DexMethodIterator it(dex_files);
- while (it.HasNext()) {
- const DexFile& dex_file = it.GetDexFile();
- uint32_t method_idx = it.GetMemberIndex();
- const CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
- if (compiled_method != NULL) {
- AddCompiledCodeInput(*compiled_method);
- }
- it.Next();
- }
- added_symbols_.clear();
-}
-
-void ElfWriterMclinker::AddCompiledCodeInput(const CompiledCode& compiled_code) {
- // Check if we've seen this compiled code before. If so skip
- // it. This can happen for reused code such as invoke stubs.
- const std::string& symbol = compiled_code.GetSymbol();
- SafeMap<const std::string*, const std::string*>::iterator it = added_symbols_.find(&symbol);
- if (it != added_symbols_.end()) {
- return;
- }
- added_symbols_.Put(&symbol, &symbol);
-
- // Add input to supply code for symbol
- const std::vector<uint8_t>* code = compiled_code.GetPortableCode();
- // TODO: ownership of code_input?
- // TODO: why does IRBuilder::ReadInput take a non-const pointer?
- mcld::Input* code_input = ir_builder_->ReadInput(symbol,
- const_cast<uint8_t*>(&(*code)[0]),
- code->size());
- CHECK(code_input != NULL);
-}
-
-void ElfWriterMclinker::AddRuntimeInputs(const std::string& android_root, bool is_host) {
- std::string libart_so(android_root);
- libart_so += kIsDebugBuild ? "/lib/libartd.so" : "/lib/libart.so";
- // TODO: ownership of libart_so_input?
- mcld::Input* libart_so_input = ir_builder_->ReadInput(libart_so, libart_so);
- CHECK(libart_so_input != NULL);
-
- std::string host_prebuilt_dir("prebuilts/gcc/linux-x86/host/i686-linux-glibc2.7-4.6");
-
- std::string compiler_runtime_lib;
- if (is_host) {
- compiler_runtime_lib += host_prebuilt_dir;
- compiler_runtime_lib += "/lib/gcc/i686-linux/4.6.x-google/libgcc.a";
- } else {
- compiler_runtime_lib += android_root;
- compiler_runtime_lib += "/lib/libcompiler_rt.a";
- }
- // TODO: ownership of compiler_runtime_lib_input?
- mcld::Input* compiler_runtime_lib_input = ir_builder_->ReadInput(compiler_runtime_lib,
- compiler_runtime_lib);
- CHECK(compiler_runtime_lib_input != NULL);
-
- std::string libc_lib;
- if (is_host) {
- libc_lib += host_prebuilt_dir;
- libc_lib += "/sysroot/usr/lib/libc.so.6";
- } else {
- libc_lib += android_root;
- libc_lib += "/lib/libc.so";
- }
- // TODO: ownership of libc_lib_input?
- mcld::Input* libc_lib_input_input = ir_builder_->ReadInput(libc_lib, libc_lib);
- CHECK(libc_lib_input_input != NULL);
-
- std::string libm_lib;
- if (is_host) {
- libm_lib += host_prebuilt_dir;
- libm_lib += "/sysroot/usr/lib/libm.so";
- } else {
- libm_lib += android_root;
- libm_lib += "/lib/libm.so";
- }
- // TODO: ownership of libm_lib_input?
- mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib);
- CHECK(libm_lib_input_input != NULL);
-}
-
-void ElfWriterMclinker::FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files) {
- std::string error_msg;
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(elf_file_, true, false, &error_msg));
- CHECK(elf_file.get() != NULL) << elf_file_->GetPath() << ": " << error_msg;
-
- uint32_t oatdata_address = GetOatDataAddress(elf_file.get());
- DexMethodIterator it(dex_files);
- while (it.HasNext()) {
- const DexFile& dex_file = it.GetDexFile();
- uint32_t method_idx = it.GetMemberIndex();
- InvokeType invoke_type = it.GetInvokeType();
- mirror::ArtMethod* method = NULL;
- if (compiler_driver_->IsImage()) {
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- // Unchecked as we hold mutator_lock_ on entry.
- ScopedObjectAccessUnchecked soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(dex_file)));
- method = linker->ResolveMethod(dex_file, method_idx, dex_cache,
- NullHandle<mirror::ClassLoader>(),
- NullHandle<mirror::ArtMethod>(), invoke_type);
- CHECK(method != NULL);
- }
- const CompiledMethod* compiled_method =
- compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method_idx));
- if (compiled_method != NULL) {
- uint32_t offset = FixupCompiledCodeOffset(*elf_file.get(), oatdata_address, *compiled_method);
- // Don't overwrite static method trampoline
- if (method != NULL &&
- (!method->IsStatic() ||
- method->IsConstructor() ||
- method->GetDeclaringClass()->IsInitialized())) {
- method->SetPortableOatCodeOffset(offset);
- }
- }
- it.Next();
- }
- symbol_to_compiled_code_offset_.clear();
-}
-
-uint32_t ElfWriterMclinker::FixupCompiledCodeOffset(ElfFile& elf_file,
- Elf32_Addr oatdata_address,
- const CompiledCode& compiled_code) {
- const std::string& symbol = compiled_code.GetSymbol();
- SafeMap<const std::string*, uint32_t>::iterator it = symbol_to_compiled_code_offset_.find(&symbol);
- if (it != symbol_to_compiled_code_offset_.end()) {
- return it->second;
- }
-
- Elf32_Addr compiled_code_address = elf_file.FindSymbolAddress(SHT_SYMTAB,
- symbol,
- true);
- CHECK_NE(0U, compiled_code_address) << symbol;
- CHECK_LT(oatdata_address, compiled_code_address) << symbol;
- uint32_t compiled_code_offset = compiled_code_address - oatdata_address;
- symbol_to_compiled_code_offset_.Put(&symbol, compiled_code_offset);
-
- const std::vector<uint32_t>& offsets = compiled_code.GetOatdataOffsetsToCompliledCodeOffset();
- for (uint32_t i = 0; i < offsets.size(); i++) {
- uint32_t oatdata_offset = oatdata_address + offsets[i];
- uint32_t* addr = reinterpret_cast<uint32_t*>(elf_file.Begin() + oatdata_offset);
- *addr = compiled_code_offset;
- }
- return compiled_code_offset;
-}
-
-} // namespace art
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
deleted file mode 100644
index 489fefb284..0000000000
--- a/compiler/elf_writer_mclinker.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_ELF_WRITER_MCLINKER_H_
-#define ART_COMPILER_ELF_WRITER_MCLINKER_H_
-
-#include <memory>
-
-#include "elf_writer.h"
-#include "safe_map.h"
-
-namespace mcld {
-class IRBuilder;
-class Input;
-class LDSection;
-class LDSymbol;
-class Linker;
-class LinkerConfig;
-class LinkerScript;
-class Module;
-} // namespace mcld
-
-namespace art {
-
-class CompiledCode;
-
-class ElfWriterMclinker FINAL : public ElfWriter {
- public:
- // Write an ELF file. Returns true on success, false on failure.
- static bool Create(File* file,
- OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host,
- const CompilerDriver& driver)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- protected:
- bool Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host)
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- private:
- ElfWriterMclinker(const CompilerDriver& driver, File* elf_file);
- ~ElfWriterMclinker();
-
- void Init();
- mcld::LDSection* AddOatInput(OatWriter* oat_writer, std::vector<uint8_t>* oat_contents);
- void AddMethodInputs(const std::vector<const DexFile*>& dex_files);
- void AddCompiledCodeInput(const CompiledCode& compiled_code);
- void AddRuntimeInputs(const std::string& android_root, bool is_host);
- void FixupOatMethodOffsets(const std::vector<const DexFile*>& dex_files)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t FixupCompiledCodeOffset(ElfFile& elf_file,
- uint32_t oatdata_address,
- const CompiledCode& compiled_code);
-
- // Setup by Init()
- std::unique_ptr<mcld::LinkerConfig> linker_config_;
- std::unique_ptr<mcld::LinkerScript> linker_script_;
- std::unique_ptr<mcld::Module> module_;
- std::unique_ptr<mcld::IRBuilder> ir_builder_;
- std::unique_ptr<mcld::Linker> linker_;
-
- // Setup by AddOatInput()
- // TODO: ownership of oat_input_?
- mcld::Input* oat_input_;
-
- // Setup by AddCompiledCodeInput
- // set of symbols for already added mcld::Inputs
- SafeMap<const std::string*, const std::string*> added_symbols_;
-
- // Setup by FixupCompiledCodeOffset
- // map of symbol names to oatdata offset
- SafeMap<const std::string*, uint32_t> symbol_to_compiled_code_offset_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterMclinker);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_ELF_WRITER_MCLINKER_H_
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 25cf086696..d651c0fb84 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -669,6 +669,8 @@ static void FillInCFIInformation(OatWriter* oat_writer,
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
typename Elf_Phdr, typename Elf_Shdr>
+// Do not inline to avoid Clang stack frame problems. b/18738594
+NO_INLINE
static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>* builder,
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 5488e2f6d0..7fabc30b67 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -67,28 +67,6 @@ TEST_F(ElfWriterTest, dlsym) {
void* dl_oatexec = NULL;
void* dl_oatlastword = NULL;
-#if defined(ART_USE_PORTABLE_COMPILER)
- {
- // We only use dlopen for loading with portable. See OatFile::Open.
- void* dl_oat_so = dlopen(elf_filename.c_str(), RTLD_NOW);
- ASSERT_TRUE(dl_oat_so != NULL) << dlerror();
- dl_oatdata = dlsym(dl_oat_so, "oatdata");
- ASSERT_TRUE(dl_oatdata != NULL);
-
- OatHeader* dl_oat_header = reinterpret_cast<OatHeader*>(dl_oatdata);
- ASSERT_TRUE(dl_oat_header->IsValid());
- dl_oatexec = dlsym(dl_oat_so, "oatexec");
- ASSERT_TRUE(dl_oatexec != NULL);
- ASSERT_LT(dl_oatdata, dl_oatexec);
-
- dl_oatlastword = dlsym(dl_oat_so, "oatlastword");
- ASSERT_TRUE(dl_oatlastword != NULL);
- ASSERT_LT(dl_oatexec, dl_oatlastword);
-
- ASSERT_EQ(0, dlclose(dl_oat_so));
- }
-#endif
-
std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str()));
ASSERT_TRUE(file.get() != NULL);
{
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index dac1ef4c06..cf979430ad 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -72,11 +72,6 @@ TEST_F(ImageTest, WriteRead) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
- if (kUsePortableCompiler) {
- // TODO: we disable this for portable so the test executes in a reasonable amount of time.
- // We shouldn't need to do this.
- compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
- }
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
dex_file->EnableWrite();
}
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index eb1b5db958..8c7d611d85 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -127,13 +127,6 @@ bool ImageWriter::Write(const std::string& image_filename,
jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
- portable_imt_conflict_trampoline_offset_ =
- oat_file_->GetOatHeader().GetPortableImtConflictTrampolineOffset();
- portable_resolution_trampoline_offset_ =
- oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
- portable_to_interpreter_bridge_offset_ =
- oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
-
quick_generic_jni_trampoline_offset_ =
oat_file_->GetOatHeader().GetQuickGenericJniTrampolineOffset();
quick_imt_conflict_trampoline_offset_ =
@@ -1099,8 +1092,6 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// locations.
// Copy all of the fields from the runtime methods to the target methods first since we did a
// bytewise copy earlier.
- copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
- orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
target_ptr_size_);
copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
@@ -1110,14 +1101,10 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
- GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
} else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
orig == runtime->GetImtUnimplementedMethod())) {
- copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
- GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
} else {
@@ -1125,8 +1112,6 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(orig->IsAbstract())) {
- copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
- GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
@@ -1137,29 +1122,6 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
const uint8_t* quick_code = GetQuickCode(orig, &quick_is_interpreted);
copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
- // Portable entrypoint:
- const uint8_t* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
- bool portable_is_interpreted = false;
- if (portable_code != nullptr &&
- (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
- // We have code for a non-static or initialized method, just use the code.
- } else if (portable_code == nullptr && orig->IsNative() &&
- (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
- // Non-static or initialized native method missing compiled code, use generic JNI version.
- // TODO: generic JNI support for LLVM.
- portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
- } else if (portable_code == nullptr && !orig->IsNative()) {
- // We don't have code at all for a non-native method, use the interpreter.
- portable_code = GetOatAddress(portable_to_interpreter_bridge_offset_);
- portable_is_interpreted = true;
- } else {
- CHECK(!orig->GetDeclaringClass()->IsInitialized());
- // We have code for a static method, but need to go through the resolution stub for class
- // initialization.
- portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
- }
- copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
- portable_code, target_ptr_size_);
// JNI entrypoint:
if (orig->IsNative()) {
// The native method's pointer is set to a stub to lookup via dlsym.
@@ -1170,7 +1132,7 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
// Interpreter entrypoint:
// Set the interpreter entrypoint depending on whether there is compiled code or not.
- uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
+ uint32_t interpreter_code = (quick_is_interpreted)
? interpreter_to_interpreter_bridge_offset_
: interpreter_to_compiled_code_bridge_offset_;
EntryPointFromInterpreter* interpreter_entrypoint =
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 4418879e73..53f5ce4545 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -48,8 +48,7 @@ class ImageWriter FINAL {
image_end_(0), image_objects_offset_begin_(0), image_roots_address_(0), oat_file_(nullptr),
oat_data_begin_(nullptr), interpreter_to_interpreter_bridge_offset_(0),
interpreter_to_compiled_code_bridge_offset_(0), jni_dlsym_lookup_offset_(0),
- portable_imt_conflict_trampoline_offset_(0), portable_resolution_trampoline_offset_(0),
- portable_to_interpreter_bridge_offset_(0), quick_generic_jni_trampoline_offset_(0),
+ quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0), quick_resolution_trampoline_offset_(0),
quick_to_interpreter_bridge_offset_(0), compile_pic_(compile_pic),
target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
@@ -178,12 +177,9 @@ class ImageWriter FINAL {
}
const uint8_t* GetOatAddress(uint32_t offset) const {
-#if !defined(ART_USE_PORTABLE_COMPILER)
// With Quick, code is within the OatFile, as there are all in one
- // .o ELF object. However with Portable, the code is always in
- // different .o ELF objects.
+ // .o ELF object.
DCHECK_LT(offset, oat_file_->Size());
-#endif
if (offset == 0u) {
return nullptr;
}
@@ -302,9 +298,6 @@ class ImageWriter FINAL {
uint32_t interpreter_to_interpreter_bridge_offset_;
uint32_t interpreter_to_compiled_code_bridge_offset_;
uint32_t jni_dlsym_lookup_offset_;
- uint32_t portable_imt_conflict_trampoline_offset_;
- uint32_t portable_resolution_trampoline_offset_;
- uint32_t portable_to_interpreter_bridge_offset_;
uint32_t quick_generic_jni_trampoline_offset_;
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 27554423ca..281e3fe109 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -80,8 +80,6 @@ class JniCompilerTest : public CommonCompilerTest {
CompileMethod(method);
ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
<< method_name << " " << method_sig;
- ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr)
- << method_name << " " << method_sig;
}
}
}
@@ -204,7 +202,6 @@ void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
}
void JniCompilerTest::CompileAndRunNoArgMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
EXPECT_EQ(0, gJava_MyClassNatives_foo_calls);
@@ -219,7 +216,6 @@ void JniCompilerTest::CompileAndRunNoArgMethodImpl() {
JNI_TEST(CompileAndRunNoArgMethod)
void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "bar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_bar
@@ -234,7 +230,6 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
JNI_TEST(CompileAndRunIntMethodThroughStub)
void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar
@@ -262,7 +257,6 @@ jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
}
void JniCompilerTest::CompileAndRunIntMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooI));
@@ -293,7 +287,6 @@ jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
}
void JniCompilerTest::CompileAndRunIntIntMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooII));
@@ -325,7 +318,6 @@ jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y)
}
void JniCompilerTest::CompileAndRunLongLongMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ));
@@ -358,7 +350,6 @@ jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdoub
}
void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooDD));
@@ -390,7 +381,6 @@ jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong
}
void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ_synchronized));
@@ -430,7 +420,6 @@ jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject
}
void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooIOO));
@@ -479,7 +468,6 @@ jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
}
void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSII));
@@ -507,7 +495,6 @@ jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble
}
void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSDD));
@@ -535,7 +522,6 @@ jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) {
}
void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "logD", "(D)D", reinterpret_cast<void*>(&Java_MyClassNatives_logD));
jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0);
@@ -549,7 +535,6 @@ jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) {
}
void JniCompilerTest::RunStaticLogFloatMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "logF", "(F)F", reinterpret_cast<void*>(&Java_MyClassNatives_logF));
jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0);
@@ -571,7 +556,6 @@ jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) {
}
void JniCompilerTest::RunStaticReturnTrueImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "returnTrue", "()Z", reinterpret_cast<void*>(&Java_MyClassNatives_returnTrue));
jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_);
@@ -581,7 +565,6 @@ void JniCompilerTest::RunStaticReturnTrueImpl() {
JNI_TEST(RunStaticReturnTrue)
void JniCompilerTest::RunStaticReturnFalseImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "returnFalse", "()Z",
reinterpret_cast<void*>(&Java_MyClassNatives_returnFalse));
@@ -592,7 +575,6 @@ void JniCompilerTest::RunStaticReturnFalseImpl() {
JNI_TEST(RunStaticReturnFalse)
void JniCompilerTest::RunGenericStaticReturnIntImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "returnInt", "()I", reinterpret_cast<void*>(&Java_MyClassNatives_returnInt));
jint result = env_->CallStaticIntMethod(jklass_, jmethod_);
@@ -626,7 +608,6 @@ jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y
void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSIOO));
@@ -684,7 +665,6 @@ jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject
}
void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSSIOO));
@@ -725,7 +705,6 @@ void Java_MyClassNatives_throwException(JNIEnv* env, jobject) {
}
void JniCompilerTest::ExceptionHandlingImpl() {
- TEST_DISABLED_FOR_PORTABLE();
{
ASSERT_FALSE(runtime_->IsStarted());
ScopedObjectAccess soa(Thread::Current());
@@ -810,7 +789,6 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
}
void JniCompilerTest::NativeStackTraceElementImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_nativeUpCall));
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10);
@@ -824,7 +802,6 @@ jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) {
}
void JniCompilerTest::ReturnGlobalRefImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooO));
jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_);
@@ -844,7 +821,6 @@ jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) {
}
void JniCompilerTest::LocalReferenceTableClearingTestImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I", reinterpret_cast<void*>(&local_ref_test));
// 1000 invocations of a method that adds 10 local references
for (int i = 0; i < 1000; i++) {
@@ -865,7 +841,6 @@ void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject
}
void JniCompilerTest::JavaLangSystemArrayCopyImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V",
reinterpret_cast<void*>(&my_arraycopy));
env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876);
@@ -883,7 +858,6 @@ jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint ex
}
void JniCompilerTest::CompareAndSwapIntImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z",
reinterpret_cast<void*>(&my_casi));
jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, INT64_C(0x12345678ABCDEF88),
@@ -903,7 +877,6 @@ jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2,
}
void JniCompilerTest::GetTextImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
reinterpret_cast<void*>(&my_gettext));
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
@@ -931,7 +904,6 @@ jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstri
}
void JniCompilerTest::GetSinkPropertiesNativeImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_GetSinkProperties));
@@ -957,12 +929,10 @@ jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclas
}
void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldReturnClass));
CheckJniAbortCatcher check_jni_abort_catcher;
- // TODO: check type of returns with portable JNI compiler.
// This native method is bad, and tries to return a jstring as a jclass.
env_->CallObjectMethod(jobj_, jmethod_);
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
@@ -977,12 +947,10 @@ void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() {
JNI_TEST(UpcallReturnTypeChecking_Instance)
void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldReturnClass));
CheckJniAbortCatcher check_jni_abort_catcher;
- // TODO: check type of returns with portable JNI compiler.
// This native method is bad, and tries to return a jstring as a jclass.
env_->CallStaticObjectMethod(jklass_, jmethod_);
check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
@@ -1005,7 +973,6 @@ void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass
}
void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldTakeClass));
@@ -1018,7 +985,6 @@ void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() {
JNI_TEST(UpcallArgumentTypeChecking_Instance)
void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldTakeClass));
@@ -1041,7 +1007,6 @@ jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1,
}
void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkFloats", "(FF)F",
reinterpret_cast<void*>(&Java_MyClassNatives_checkFloats));
@@ -1071,7 +1036,6 @@ void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED,
}
void JniCompilerTest::CheckParameterAlignImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkParameterAlign", "(IJ)V",
reinterpret_cast<void*>(&Java_MyClassNatives_checkParameterAlign));
@@ -1486,7 +1450,6 @@ const char* longSig =
"Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V";
void JniCompilerTest::MaxParamNumberImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "maxParamNumber", longSig,
reinterpret_cast<void*>(&Java_MyClassNatives_maxParamNumber));
@@ -1512,7 +1475,6 @@ void JniCompilerTest::MaxParamNumberImpl() {
JNI_TEST(MaxParamNumber)
void JniCompilerTest::WithoutImplementationImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "withoutImplementation", "()V", nullptr);
env_->CallVoidMethod(jobj_, jmethod_);
@@ -1562,7 +1524,6 @@ void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, j
}
void JniCompilerTest::StackArgsIntsFirstImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsIntsFirst));
@@ -1633,7 +1594,6 @@ void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat
}
void JniCompilerTest::StackArgsFloatsFirstImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsFloatsFirst));
@@ -1703,7 +1663,6 @@ void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jin
}
void JniCompilerTest::StackArgsMixedImpl() {
- TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsMixed));
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
deleted file mode 100644
index ff37d858ad..0000000000
--- a/compiler/jni/portable/jni_compiler.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jni_compiler.h"
-
-#include "base/logging.h"
-#include "class_linker.h"
-#include "compiled_method.h"
-#include "dex_file-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "llvm/compiler_llvm.h"
-#include "llvm/ir_builder.h"
-#include "llvm/llvm_compilation_unit.h"
-#include "llvm/runtime_support_llvm_func.h"
-#include "llvm/utils_llvm.h"
-#include "mirror/art_method.h"
-#include "runtime.h"
-#include "stack.h"
-#include "thread.h"
-
-#include <llvm/ADT/SmallVector.h>
-#include <llvm/IR/BasicBlock.h>
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/Type.h>
-
-namespace art {
-namespace llvm {
-
-using ::art::llvm::runtime_support::JniMethodEnd;
-using ::art::llvm::runtime_support::JniMethodEndSynchronized;
-using ::art::llvm::runtime_support::JniMethodEndWithReference;
-using ::art::llvm::runtime_support::JniMethodEndWithReferenceSynchronized;
-using ::art::llvm::runtime_support::JniMethodStart;
-using ::art::llvm::runtime_support::JniMethodStartSynchronized;
-using ::art::llvm::runtime_support::RuntimeId;
-
-JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver* driver,
- const DexCompilationUnit* dex_compilation_unit)
- : cunit_(cunit), driver_(driver), module_(cunit_->GetModule()),
- context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
- dex_compilation_unit_(dex_compilation_unit),
- func_(NULL), elf_func_idx_(0) {
- // Check: Ensure that JNI compiler will only get "native" method
- CHECK(dex_compilation_unit->IsNative());
-}
-
-CompiledMethod* JniCompiler::Compile() {
- const bool is_static = dex_compilation_unit_->IsStatic();
- const bool is_synchronized = dex_compilation_unit_->IsSynchronized();
- const DexFile* dex_file = dex_compilation_unit_->GetDexFile();
- DexFile::MethodId const& method_id =
- dex_file->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
- char const return_shorty = dex_file->GetMethodShorty(method_id)[0];
- ::llvm::Value* this_object_or_class_object;
-
- uint32_t method_idx = dex_compilation_unit_->GetDexMethodIndex();
- std::string func_name(StringPrintf("jni_%s",
- MangleForJni(PrettyMethod(method_idx, *dex_file)).c_str()));
- CreateFunction(func_name);
-
- // Set argument name
- ::llvm::Function::arg_iterator arg_begin(func_->arg_begin());
- ::llvm::Function::arg_iterator arg_end(func_->arg_end());
- ::llvm::Function::arg_iterator arg_iter(arg_begin);
-
- DCHECK_NE(arg_iter, arg_end);
- arg_iter->setName("method");
- ::llvm::Value* method_object_addr = arg_iter++;
-
- if (!is_static) {
- // Non-static, the second argument is "this object"
- this_object_or_class_object = arg_iter++;
- } else {
- // Load class object
- this_object_or_class_object =
- irb_.LoadFromObjectOffset(method_object_addr,
- mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
- }
- // Actual argument (ignore method and this object)
- arg_begin = arg_iter;
-
- // Count the number of Object* arguments
- uint32_t handle_scope_size = 1;
- // "this" object pointer for non-static
- // "class" object pointer for static
- for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
-#if !defined(NDEBUG)
- arg_iter->setName(StringPrintf("a%u", i));
-#endif
- if (arg_iter->getType() == irb_.getJObjectTy()) {
- ++handle_scope_size;
- }
- }
-
- // Shadow stack
- ::llvm::StructType* shadow_frame_type = irb_.getShadowFrameTy(handle_scope_size);
- ::llvm::AllocaInst* shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
-
- // Store the dex pc
- irb_.StoreToObjectOffset(shadow_frame_,
- ShadowFrame::DexPCOffset(),
- irb_.getInt32(DexFile::kDexNoIndex),
- kTBAAShadowFrame);
-
- // Push the shadow frame
- ::llvm::Value* shadow_frame_upcast = irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
- ::llvm::Value* old_shadow_frame =
- irb_.Runtime().EmitPushShadowFrame(shadow_frame_upcast, method_object_addr, handle_scope_size);
-
- // Get JNIEnv
- ::llvm::Value* jni_env_object_addr =
- irb_.Runtime().EmitLoadFromThreadOffset(Thread::JniEnvOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAARuntimeInfo);
-
- // Get callee code_addr
- ::llvm::Value* code_addr =
- irb_.LoadFromObjectOffset(method_object_addr,
- mirror::ArtMethod::NativeMethodOffset().Int32Value(),
- GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(),
- is_static, true)->getPointerTo(),
- kTBAARuntimeInfo);
-
- // Load actual parameters
- std::vector< ::llvm::Value*> args;
-
- // The 1st parameter: JNIEnv*
- args.push_back(jni_env_object_addr);
-
- // Variables for GetElementPtr
- ::llvm::Value* gep_index[] = {
- irb_.getInt32(0), // No displacement for shadow frame pointer
- irb_.getInt32(1), // handle scope
- NULL,
- };
-
- size_t handle_scope_member_index = 0;
-
- // Store the "this object or class object" to handle scope
- gep_index[2] = irb_.getInt32(handle_scope_member_index++);
- ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
- irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(this_object_or_class_object, handle_scope_field_addr, kTBAAShadowFrame);
- // Push the "this object or class object" to out args
- this_object_or_class_object = irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy());
- args.push_back(this_object_or_class_object);
- // Store arguments to handle scope, and push back to args
- for (arg_iter = arg_begin; arg_iter != arg_end; ++arg_iter) {
- if (arg_iter->getType() == irb_.getJObjectTy()) {
- // Store the reference type arguments to handle scope
- gep_index[2] = irb_.getInt32(handle_scope_member_index++);
- ::llvm::Value* handle_scope_field_addr = irb_.CreateBitCast(irb_.CreateGEP(shadow_frame_, gep_index),
- irb_.getJObjectTy()->getPointerTo());
- irb_.CreateStore(arg_iter, handle_scope_field_addr, kTBAAShadowFrame);
- // Note null is placed in the handle scope but the jobject passed to the native code must be null
- // (not a pointer into the handle scope as with regular references).
- ::llvm::Value* equal_null = irb_.CreateICmpEQ(arg_iter, irb_.getJNull());
- ::llvm::Value* arg =
- irb_.CreateSelect(equal_null,
- irb_.getJNull(),
- irb_.CreateBitCast(handle_scope_field_addr, irb_.getJObjectTy()));
- args.push_back(arg);
- } else {
- args.push_back(arg_iter);
- }
- }
-
- ::llvm::Value* saved_local_ref_cookie;
- { // JniMethodStart
- RuntimeId func_id = is_synchronized ? JniMethodStartSynchronized
- : JniMethodStart;
- ::llvm::SmallVector< ::llvm::Value*, 2> args;
- if (is_synchronized) {
- args.push_back(this_object_or_class_object);
- }
- args.push_back(irb_.Runtime().EmitGetCurrentThread());
- saved_local_ref_cookie =
- irb_.CreateCall(irb_.GetRuntime(func_id), args);
- }
-
- // Call!!!
- ::llvm::Value* retval = irb_.CreateCall(code_addr, args);
-
- { // JniMethodEnd
- bool is_return_ref = return_shorty == 'L';
- RuntimeId func_id =
- is_return_ref ? (is_synchronized ? JniMethodEndWithReferenceSynchronized
- : JniMethodEndWithReference)
- : (is_synchronized ? JniMethodEndSynchronized
- : JniMethodEnd);
- ::llvm::SmallVector< ::llvm::Value*, 4> args;
- if (is_return_ref) {
- args.push_back(retval);
- }
- args.push_back(saved_local_ref_cookie);
- if (is_synchronized) {
- args.push_back(this_object_or_class_object);
- }
- args.push_back(irb_.Runtime().EmitGetCurrentThread());
-
- ::llvm::Value* decoded_jobject =
- irb_.CreateCall(irb_.GetRuntime(func_id), args);
-
- // Return decoded jobject if return reference.
- if (is_return_ref) {
- retval = decoded_jobject;
- }
- }
-
- // Pop the shadow frame
- irb_.Runtime().EmitPopShadowFrame(old_shadow_frame);
-
- // Return!
- switch (return_shorty) {
- case 'V':
- irb_.CreateRetVoid();
- break;
- case 'Z':
- case 'C':
- irb_.CreateRet(irb_.CreateZExt(retval, irb_.getInt32Ty()));
- break;
- case 'B':
- case 'S':
- irb_.CreateRet(irb_.CreateSExt(retval, irb_.getInt32Ty()));
- break;
- default:
- irb_.CreateRet(retval);
- break;
- }
-
- // Verify the generated bitcode
- VERIFY_LLVM_FUNCTION(*func_);
-
- cunit_->Materialize();
-
- return new CompiledMethod(*driver_, cunit_->GetInstructionSet(), cunit_->GetElfObject(),
- func_name);
-}
-
-
-void JniCompiler::CreateFunction(const std::string& func_name) {
- CHECK_NE(0U, func_name.size());
-
- const bool is_static = dex_compilation_unit_->IsStatic();
-
- // Get function type
- ::llvm::FunctionType* func_type =
- GetFunctionType(dex_compilation_unit_->GetDexMethodIndex(), is_static, false);
-
- // Create function
- func_ = ::llvm::Function::Create(func_type, ::llvm::Function::InternalLinkage,
- func_name, module_);
-
- // Create basic block
- ::llvm::BasicBlock* basic_block = ::llvm::BasicBlock::Create(*context_, "B0", func_);
-
- // Set insert point
- irb_.SetInsertPoint(basic_block);
-}
-
-
-::llvm::FunctionType* JniCompiler::GetFunctionType(uint32_t method_idx,
- bool is_static, bool is_native_function) {
- // Get method signature
- uint32_t shorty_size;
- const char* shorty = dex_compilation_unit_->GetShorty(&shorty_size);
- CHECK_GE(shorty_size, 1u);
-
- // Get return type
- ::llvm::Type* ret_type = NULL;
- switch (shorty[0]) {
- case 'V': ret_type = irb_.getJVoidTy(); break;
- case 'Z':
- case 'B':
- case 'C':
- case 'S':
- case 'I': ret_type = irb_.getJIntTy(); break;
- case 'F': ret_type = irb_.getJFloatTy(); break;
- case 'J': ret_type = irb_.getJLongTy(); break;
- case 'D': ret_type = irb_.getJDoubleTy(); break;
- case 'L': ret_type = irb_.getJObjectTy(); break;
- default: LOG(FATAL) << "Unreachable: unexpected return type in shorty " << shorty;
- UNREACHABLE();
- }
- // Get argument type
- std::vector< ::llvm::Type*> args_type;
-
- args_type.push_back(irb_.getJObjectTy()); // method object pointer
-
- if (!is_static || is_native_function) {
- // "this" object pointer for non-static
- // "class" object pointer for static naitve
- args_type.push_back(irb_.getJType('L'));
- }
-
- for (uint32_t i = 1; i < shorty_size; ++i) {
- args_type.push_back(irb_.getJType(shorty[i]));
- }
-
- return ::llvm::FunctionType::get(ret_type, args_type, false);
-}
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/jni/portable/jni_compiler.h b/compiler/jni/portable/jni_compiler.h
deleted file mode 100644
index ffabfe61c2..0000000000
--- a/compiler/jni/portable/jni_compiler.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
-#define ART_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
-
-#include <stdint.h>
-
-#include <string>
-
-namespace art {
- class ClassLinker;
- class CompiledMethod;
- class CompilerDriver;
- class DexFile;
- class DexCompilationUnit;
- namespace mirror {
- class ArtMethod;
- class ClassLoader;
- class DexCache;
- } // namespace mirror
-} // namespace art
-
-namespace llvm {
- class AllocaInst;
- class Function;
- class FunctionType;
- class BasicBlock;
- class LLVMContext;
- class Module;
- class Type;
- class Value;
-} // namespace llvm
-
-namespace art {
-namespace llvm {
-
-class LlvmCompilationUnit;
-class IRBuilder;
-
-class JniCompiler {
- public:
- JniCompiler(LlvmCompilationUnit* cunit,
- CompilerDriver* driver,
- const DexCompilationUnit* dex_compilation_unit);
-
- CompiledMethod* Compile();
-
- private:
- void CreateFunction(const std::string& symbol);
-
- ::llvm::FunctionType* GetFunctionType(uint32_t method_idx,
- bool is_static, bool is_target_function);
-
- private:
- LlvmCompilationUnit* cunit_;
- CompilerDriver* const driver_;
-
- ::llvm::Module* module_;
- ::llvm::LLVMContext* context_;
- IRBuilder& irb_;
-
- const DexCompilationUnit* const dex_compilation_unit_;
-
- ::llvm::Function* func_;
- uint16_t elf_func_idx_;
-};
-
-
-} // namespace llvm
-} // namespace art
-
-
-#endif // ART_COMPILER_JNI_PORTABLE_JNI_COMPILER_H_
diff --git a/compiler/llvm/art_module.ll b/compiler/llvm/art_module.ll
deleted file mode 100644
index 233692c079..0000000000
--- a/compiler/llvm/art_module.ll
+++ /dev/null
@@ -1,153 +0,0 @@
-;;
-;; Copyright (C) 2012 The Android Open Source Project
-;;
-;; Licensed under the Apache License, Version 2.0 (the "License");
-;; you may not use this file except in compliance with the License.
-;; You may obtain a copy of the License at
-;;
-;; http://www.apache.org/licenses/LICENSE-2.0
-;;
-;; Unless required by applicable law or agreed to in writing, software
-;; distributed under the License is distributed on an "AS IS" BASIS,
-;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-;; See the License for the specific language governing permissions and
-;; limitations under the License.
-;;
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Type
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-%JavaObject = type opaque
-
-%ShadowFrame = type { i32 ; Number of VRegs
- , %ShadowFrame* ; Previous frame
- , %JavaObject* ; Method object pointer
- , i32 ; Line number for stack backtrace
- ; [0 x i32] ; VRegs
- }
-
-declare void @__art_type_list(%JavaObject*, %ShadowFrame*)
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Thread
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare %JavaObject* @art_portable_get_current_thread_from_code()
-declare %JavaObject* @art_portable_set_current_thread_from_code(%JavaObject*)
-
-declare void @art_portable_lock_object_from_code(%JavaObject*, %JavaObject*)
-declare void @art_portable_unlock_object_from_code(%JavaObject*, %JavaObject*)
-
-declare void @art_portable_test_suspend_from_code(%JavaObject*)
-
-declare %ShadowFrame* @art_portable_push_shadow_frame_from_code(%JavaObject*, %ShadowFrame*, %JavaObject*, i32)
-declare void @art_portable_pop_shadow_frame_from_code(%ShadowFrame*)
-
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Exception
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare %JavaObject* @art_portable_get_and_clear_exception(%JavaObject*)
-declare void @art_portable_throw_div_zero_from_code()
-declare void @art_portable_throw_array_bounds_from_code(i32, i32)
-declare void @art_portable_throw_no_such_method_from_code(i32)
-declare void @art_portable_throw_null_pointer_exception_from_code(i32)
-declare void @art_portable_throw_stack_overflow_from_code()
-declare void @art_portable_throw_exception_from_code(%JavaObject*)
-
-declare i32 @art_portable_find_catch_block_from_code(%JavaObject*, i32)
-
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Object Space
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare %JavaObject* @art_portable_alloc_object_from_code(i32, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_alloc_object_from_code_with_access_check(i32, %JavaObject*, %JavaObject*)
-
-declare %JavaObject* @art_portable_alloc_array_from_code(i32, %JavaObject*, i32, %JavaObject*)
-declare %JavaObject* @art_portable_alloc_array_from_code_with_access_check(i32, %JavaObject*, i32, %JavaObject*)
-declare %JavaObject* @art_portable_check_and_alloc_array_from_code(i32, %JavaObject*, i32, %JavaObject*)
-declare %JavaObject* @art_portable_check_and_alloc_array_from_code_with_access_check(i32, %JavaObject*, i32, %JavaObject*)
-
-declare void @art_portable_find_instance_field_from_code(i32, %JavaObject*)
-declare void @art_portable_find_static_field_from_code(i32, %JavaObject*)
-
-declare %JavaObject* @art_portable_find_static_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_find_direct_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_find_virtual_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_find_super_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_find_interface_method_from_code_with_access_check(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_find_interface_method_from_code(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-
-declare %JavaObject* @art_portable_initialize_static_storage_from_code(i32, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_initialize_type_from_code(i32, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_initialize_type_and_verify_access_from_code(i32, %JavaObject*, %JavaObject*)
-
-declare %JavaObject* @art_portable_resolve_string_from_code(%JavaObject*, i32)
-
-declare i32 @art_portable_set32_static_from_code(i32, %JavaObject*, i32)
-declare i32 @art_portable_set64_static_from_code(i32, %JavaObject*, i64)
-declare i32 @art_portable_set_obj_static_from_code(i32, %JavaObject*, %JavaObject*)
-
-declare i32 @art_portable_get32_static_from_code(i32, %JavaObject*)
-declare i64 @art_portable_get64_static_from_code(i32, %JavaObject*)
-declare %JavaObject* @art_portable_get_obj_static_from_code(i32, %JavaObject*)
-
-declare i32 @art_portable_set32_instance_from_code(i32, %JavaObject*, %JavaObject*, i32)
-declare i32 @art_portable_set64_instance_from_code(i32, %JavaObject*, %JavaObject*, i64)
-declare i32 @art_portable_set_obj_instance_from_code(i32, %JavaObject*, %JavaObject*, %JavaObject*)
-
-declare i32 @art_portable_get32_instance_from_code(i32, %JavaObject*, %JavaObject*)
-declare i64 @art_portable_get64_instance_from_code(i32, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_get_obj_instance_from_code(i32, %JavaObject*, %JavaObject*)
-
-declare %JavaObject* @art_portable_decode_jobject_in_thread(%JavaObject*, %JavaObject*)
-
-declare void @art_portable_fill_array_data_from_code(%JavaObject*, i32, %JavaObject*, i32)
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Type Checking, in the nature of casting
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare i32 @art_portable_is_assignable_from_code(%JavaObject*, %JavaObject*)
-declare void @art_portable_check_cast_from_code(%JavaObject*, %JavaObject*)
-declare void @art_portable_check_put_array_element_from_code(%JavaObject*, %JavaObject*)
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Math
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare i64 @art_d2l(double)
-declare i32 @art_d2i(double)
-declare i64 @art_f2l(float)
-declare i32 @art_f2i(float)
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; JNI
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare i32 @art_portable_jni_method_start(%JavaObject*)
-declare i32 @art_portable_jni_method_start_synchronized(%JavaObject*, %JavaObject*)
-
-declare void @art_portable_jni_method_end(i32, %JavaObject*)
-declare void @art_portable_jni_method_end_synchronized(i32, %JavaObject*, %JavaObject*)
-declare %JavaObject* @art_portable_jni_method_end_with_reference(%JavaObject*, i32, %JavaObject*)
-declare %JavaObject* @art_portable_jni_method_end_with_reference_synchronized(%JavaObject*, i32, %JavaObject*, %JavaObject*)
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; Temporary runtime support, will be removed in the future
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-declare i1 @art_portable_is_exception_pending_from_code()
-
-declare void @art_portable_mark_gc_card_from_code(%JavaObject*, %JavaObject*)
-
-declare void @art_portable_proxy_invoke_handler_from_code(%JavaObject*, ...)
diff --git a/compiler/llvm/backend_options.h b/compiler/llvm/backend_options.h
deleted file mode 100644
index 2a08bda2f1..0000000000
--- a/compiler/llvm/backend_options.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_BACKEND_OPTIONS_H_
-#define ART_COMPILER_LLVM_BACKEND_OPTIONS_H_
-
-#include <llvm/Support/CommandLine.h>
-
-#define DECLARE_ARM_BACKEND_OPTIONS \
-extern llvm::cl::opt<bool> EnableARMLongCalls; \
-extern llvm::cl::opt<bool> ReserveR9;
-
-#define INITIAL_ARM_BACKEND_OPTIONS \
-EnableARMLongCalls = true; \
-ReserveR9 = true;
-
-#define DECLARE_X86_BACKEND_OPTIONS
-#define INITIAL_X86_BACKEND_OPTIONS
-
-#define DECLARE_Mips_BACKEND_OPTIONS
-#define INITIAL_Mips_BACKEND_OPTIONS
-
-#define LLVM_TARGET(TargetName) DECLARE_##TargetName##_BACKEND_OPTIONS
-#include "llvm/Config/Targets.def"
-
-namespace art {
-namespace llvm {
-
-inline void InitialBackendOptions() {
-#define LLVM_TARGET(TargetName) INITIAL_##TargetName##_BACKEND_OPTIONS
-#include "llvm/Config/Targets.def"
-}
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_BACKEND_OPTIONS_H_
diff --git a/compiler/llvm/backend_types.h b/compiler/llvm/backend_types.h
deleted file mode 100644
index 8ca88ddeab..0000000000
--- a/compiler/llvm/backend_types.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_BACKEND_TYPES_H_
-#define ART_COMPILER_LLVM_BACKEND_TYPES_H_
-
-#include "base/logging.h"
-
-
-namespace art {
-namespace llvm {
-
-
-enum JType {
- kVoid,
- kBoolean,
- kByte,
- kChar,
- kShort,
- kInt,
- kLong,
- kFloat,
- kDouble,
- kObject,
- MAX_JTYPE
-};
-
-enum TBAASpecialType {
- kTBAARegister,
- kTBAAStackTemp,
- kTBAAHeapArray,
- kTBAAHeapInstance,
- kTBAAHeapStatic,
- kTBAAJRuntime,
- kTBAARuntimeInfo,
- kTBAAShadowFrame,
- kTBAAConstJObject,
- MAX_TBAA_SPECIAL_TYPE
-};
-
-
-enum ExpectCond {
- kLikely,
- kUnlikely,
- MAX_EXPECT
-};
-
-
-inline JType GetJTypeFromShorty(char shorty_jty) {
- switch (shorty_jty) {
- case 'V':
- return kVoid;
-
- case 'Z':
- return kBoolean;
-
- case 'B':
- return kByte;
-
- case 'C':
- return kChar;
-
- case 'S':
- return kShort;
-
- case 'I':
- return kInt;
-
- case 'J':
- return kLong;
-
- case 'F':
- return kFloat;
-
- case 'D':
- return kDouble;
-
- case 'L':
- return kObject;
-
- default:
- LOG(FATAL) << "Unknown Dalvik shorty descriptor: " << shorty_jty;
- return kVoid;
- }
-}
-
-} // namespace llvm
-} // namespace art
-
-
-#endif // ART_COMPILER_LLVM_BACKEND_TYPES_H_
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
deleted file mode 100644
index 3aeecad78e..0000000000
--- a/compiler/llvm/compiler_llvm.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_llvm.h"
-
-#include "backend_options.h"
-#include "base/stl_util.h"
-#include "class_linker.h"
-#include "compiled_method.h"
-#include "dex/verification_results.h"
-#include "dex/verified_method.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "globals.h"
-#include "ir_builder.h"
-#include "jni/portable/jni_compiler.h"
-#include "llvm_compilation_unit.h"
-#include "thread-inl.h"
-#include "utils_llvm.h"
-#include "verifier/method_verifier.h"
-
-#include <llvm/LinkAllPasses.h>
-#include <llvm/Support/ManagedStatic.h>
-#include <llvm/Support/TargetSelect.h>
-#include <llvm/Support/Threading.h>
-
-namespace art {
-void CompileOneMethod(CompilerDriver& driver,
- Compiler* compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file,
- void* llvm_info);
-}
-
-namespace llvm {
- extern bool TimePassesIsEnabled;
-}
-
-namespace {
-
-pthread_once_t llvm_initialized = PTHREAD_ONCE_INIT;
-
-void InitializeLLVM() {
- // Initialize LLVM internal data structure for multithreading
- llvm::llvm_start_multithreaded();
-
- // NOTE: Uncomment following line to show the time consumption of LLVM passes
- // llvm::TimePassesIsEnabled = true;
-
- // Initialize LLVM target-specific options.
- art::llvm::InitialBackendOptions();
-
- // Initialize LLVM target, MC subsystem, asm printer, and asm parser.
- if (art::kIsTargetBuild) {
- // Don't initialize all targets on device. Just initialize the device's native target
- llvm::InitializeNativeTarget();
- llvm::InitializeNativeTargetAsmPrinter();
- llvm::InitializeNativeTargetAsmParser();
- } else {
- llvm::InitializeAllTargets();
- llvm::InitializeAllTargetMCs();
- llvm::InitializeAllAsmPrinters();
- llvm::InitializeAllAsmParsers();
- }
-
- // Initialize LLVM optimization passes
- llvm::PassRegistry &registry = *llvm::PassRegistry::getPassRegistry();
-
- llvm::initializeCore(registry);
- llvm::initializeScalarOpts(registry);
- llvm::initializeIPO(registry);
- llvm::initializeAnalysis(registry);
- llvm::initializeIPA(registry);
- llvm::initializeTransformUtils(registry);
- llvm::initializeInstCombine(registry);
- llvm::initializeInstrumentation(registry);
- llvm::initializeTarget(registry);
-}
-
-// The Guard to Shutdown LLVM
-// llvm::llvm_shutdown_obj llvm_guard;
-// TODO: We are commenting out this line because this will cause SEGV from
-// time to time.
-// Two reasons: (1) the order of the destruction of static objects, or
-// (2) dlopen/dlclose side-effect on static objects.
-
-} // anonymous namespace
-
-
-namespace art {
-namespace llvm {
-
-
-::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
-
-
-CompilerLLVM::CompilerLLVM(CompilerDriver* driver, InstructionSet insn_set)
- : compiler_driver_(driver), insn_set_(insn_set),
- next_cunit_id_lock_("compilation unit id lock"), next_cunit_id_(1) {
-
- // Initialize LLVM libraries
- pthread_once(&llvm_initialized, InitializeLLVM);
-}
-
-
-CompilerLLVM::~CompilerLLVM() {
-}
-
-
-LlvmCompilationUnit* CompilerLLVM::AllocateCompilationUnit() {
- MutexLock GUARD(Thread::Current(), next_cunit_id_lock_);
- LlvmCompilationUnit* cunit = new LlvmCompilationUnit(this, next_cunit_id_++);
- if (!bitcode_filename_.empty()) {
- cunit->SetBitcodeFileName(StringPrintf("%s-%u",
- bitcode_filename_.c_str(),
- cunit->GetCompilationUnitId()));
- }
- return cunit;
-}
-
-
-CompiledMethod* CompilerLLVM::
-CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_type) {
- std::unique_ptr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
-
- cunit->SetDexCompilationUnit(dex_compilation_unit);
- cunit->SetCompilerDriver(compiler_driver_);
- // TODO: consolidate ArtCompileMethods
- CompileOneMethod(compiler_driver_,
- compiler_driver_->GetCompiler(),
- dex_compilation_unit->GetCodeItem(),
- dex_compilation_unit->GetAccessFlags(),
- invoke_type,
- dex_compilation_unit->GetClassDefIndex(),
- dex_compilation_unit->GetDexMethodIndex(),
- dex_compilation_unit->GetClassLoader(),
- *dex_compilation_unit->GetDexFile(),
- cunit.get());
-
- cunit->Materialize();
-
- return new CompiledMethod(*compiler_driver_, compiler_driver_->GetInstructionSet(),
- cunit->GetElfObject(),
- dex_compilation_unit->GetVerifiedMethod()->GetDexGcMap(),
- cunit->GetDexCompilationUnit()->GetSymbol());
-}
-
-
-CompiledMethod* CompilerLLVM::
-CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
- std::unique_ptr<LlvmCompilationUnit> cunit(AllocateCompilationUnit());
-
- std::unique_ptr<JniCompiler> jni_compiler(
- new JniCompiler(cunit.get(), compiler_driver_, dex_compilation_unit));
-
- return jni_compiler->Compile();
-}
-
-
-static CompilerLLVM* ContextOf(art::CompilerDriver* driver) {
- void *compiler_context = driver->GetCompilerContext();
- CHECK(compiler_context != NULL);
- return reinterpret_cast<CompilerLLVM*>(compiler_context);
-}
-
-static CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
- void *compiler_context = driver.GetCompilerContext();
- CHECK(compiler_context != NULL);
- return reinterpret_cast<CompilerLLVM*>(compiler_context);
-}
-
-void ArtInitCompilerContext(CompilerDriver* driver) {
- CHECK(driver->GetCompilerContext() == nullptr);
-
- CompilerLLVM* compiler_llvm = new CompilerLLVM(driver, driver->GetInstructionSet());
-
- driver->SetCompilerContext(compiler_llvm);
-}
-
-void ArtUnInitCompilerContext(CompilerDriver* driver) {
- delete ContextOf(driver);
- driver->SetCompilerContext(nullptr);
-}
-
-CompiledMethod* ArtCompileMethod(CompilerDriver* driver, const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file) {
- UNUSED(class_def_idx); // TODO: this is used with Compiler::RequiresConstructorBarrier.
- ClassLinker *class_linker = Runtime::Current()->GetClassLinker();
-
- DexCompilationUnit dex_compilation_unit(nullptr, class_loader, class_linker, dex_file, code_item,
- class_def_idx, method_idx, access_flags,
- driver->GetVerifiedMethod(&dex_file, method_idx));
- CompilerLLVM* compiler_llvm = ContextOf(driver);
- CompiledMethod* result = compiler_llvm->CompileDexMethod(&dex_compilation_unit, invoke_type);
- return result;
-}
-
-CompiledMethod* ArtLLVMJniCompileMethod(CompilerDriver* driver, uint32_t access_flags,
- uint32_t method_idx, const DexFile& dex_file) {
- ClassLinker *class_linker = Runtime::Current()->GetClassLinker();
-
- DexCompilationUnit dex_compilation_unit(nullptr, nullptr, class_linker, dex_file, nullptr,
- 0, method_idx, access_flags, nullptr);
-
- CompilerLLVM* compiler_llvm = ContextOf(driver);
- CompiledMethod* result = compiler_llvm->CompileNativeMethod(&dex_compilation_unit);
- return result;
-}
-
-void compilerLLVMSetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
- ContextOf(driver)->SetBitcodeFileName(filename);
-}
-
-} // namespace llvm
-} // namespace art
-
diff --git a/compiler/llvm/compiler_llvm.h b/compiler/llvm/compiler_llvm.h
deleted file mode 100644
index 7d29198667..0000000000
--- a/compiler/llvm/compiler_llvm.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_COMPILER_LLVM_H_
-#define ART_COMPILER_LLVM_COMPILER_LLVM_H_
-
-#include <memory>
-#include <string>
-#include <utility>
-#include <vector>
-
-#include "base/macros.h"
-#include "dex_file.h"
-#include "driver/compiler_driver.h"
-#include "instruction_set.h"
-#include "mirror/object.h"
-
-namespace art {
- class CompiledMethod;
- class CompilerDriver;
- class DexCompilationUnit;
- namespace mirror {
- class ArtMethod;
- class ClassLoader;
- } // namespace mirror
-} // namespace art
-
-
-namespace llvm {
- class Function;
- class LLVMContext;
- class Module;
- class PointerType;
- class StructType;
- class Type;
-} // namespace llvm
-
-
-namespace art {
-namespace llvm {
-
-class LlvmCompilationUnit;
-class IRBuilder;
-
-class CompilerLLVM {
- public:
- CompilerLLVM(CompilerDriver* driver, InstructionSet insn_set);
-
- ~CompilerLLVM();
-
- CompilerDriver* GetCompiler() const {
- return compiler_driver_;
- }
-
- InstructionSet GetInstructionSet() const {
- return insn_set_;
- }
-
- void SetBitcodeFileName(const std::string& filename) {
- bitcode_filename_ = filename;
- }
-
- CompiledMethod* CompileDexMethod(DexCompilationUnit* dex_compilation_unit,
- InvokeType invoke_type);
-
- CompiledMethod* CompileGBCMethod(DexCompilationUnit* dex_compilation_unit, std::string* func);
-
- CompiledMethod* CompileNativeMethod(DexCompilationUnit* dex_compilation_unit);
-
- private:
- LlvmCompilationUnit* AllocateCompilationUnit();
-
- CompilerDriver* const compiler_driver_;
-
- const InstructionSet insn_set_;
-
- Mutex next_cunit_id_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- size_t next_cunit_id_ GUARDED_BY(next_cunit_id_lock_);
-
- std::string bitcode_filename_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilerLLVM);
-};
-
-void ArtInitCompilerContext(CompilerDriver* driver);
-
-void ArtUnInitCompilerContext(CompilerDriver* driver);
-
-CompiledMethod* ArtCompileMethod(CompilerDriver* driver, const DexFile::CodeItem* code_item,
- uint32_t access_flags, InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const DexFile& dex_file);
-
-CompiledMethod* ArtLLVMJniCompileMethod(CompilerDriver* driver, uint32_t access_flags,
- uint32_t method_idx, const DexFile& dex_file);
-
-void compilerLLVMSetBitcodeFileName(const CompilerDriver& driver, const std::string& filename);
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_COMPILER_LLVM_H_
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
deleted file mode 100644
index 902f8dd4f5..0000000000
--- a/compiler/llvm/gbc_expander.cc
+++ /dev/null
@@ -1,3796 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_file.h"
-#include "dex_file-inl.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "intrinsic_helper.h"
-#include "ir_builder.h"
-#include "method_reference.h"
-#include "mirror/art_method.h"
-#include "mirror/array.h"
-#include "mirror/string.h"
-#include "thread.h"
-#include "utils_llvm.h"
-#include "verifier/method_verifier.h"
-
-#include "dex/compiler_ir.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir.h"
-
-#include <llvm/ADT/STLExtras.h>
-#include <llvm/IR/Intrinsics.h>
-#include <llvm/IR/Metadata.h>
-#include <llvm/Pass.h>
-#include <llvm/Support/CFG.h>
-#include <llvm/Support/InstIterator.h>
-
-#include <vector>
-#include <map>
-#include <utility>
-
-using ::art::kMIRIgnoreNullCheck;
-using ::art::kMIRIgnoreRangeCheck;
-using ::art::llvm::IRBuilder;
-using ::art::llvm::IntrinsicHelper;
-using ::art::llvm::JType;
-using ::art::llvm::RuntimeSupportBuilder;
-using ::art::llvm::kBoolean;
-using ::art::llvm::kByte;
-using ::art::llvm::kChar;
-using ::art::llvm::kDouble;
-using ::art::llvm::kFloat;
-using ::art::llvm::kInt;
-using ::art::llvm::kLikely;
-using ::art::llvm::kLong;
-using ::art::llvm::kObject;
-using ::art::llvm::kShort;
-using ::art::llvm::kTBAAConstJObject;
-using ::art::llvm::kTBAAHeapArray;
-using ::art::llvm::kTBAAHeapInstance;
-using ::art::llvm::kTBAAHeapStatic;
-using ::art::llvm::kTBAARegister;
-using ::art::llvm::kTBAARuntimeInfo;
-using ::art::llvm::kTBAAShadowFrame;
-using ::art::llvm::kUnlikely;
-using ::art::llvm::kVoid;
-using ::art::llvm::runtime_support::AllocArray;
-using ::art::llvm::runtime_support::AllocArrayWithAccessCheck;
-using ::art::llvm::runtime_support::AllocObject;
-using ::art::llvm::runtime_support::AllocObjectWithAccessCheck;
-using ::art::llvm::runtime_support::CheckAndAllocArray;
-using ::art::llvm::runtime_support::CheckAndAllocArrayWithAccessCheck;
-using ::art::llvm::runtime_support::CheckCast;
-using ::art::llvm::runtime_support::CheckPutArrayElement;
-using ::art::llvm::runtime_support::FillArrayData;
-using ::art::llvm::runtime_support::FindCatchBlock;
-using ::art::llvm::runtime_support::FindDirectMethodWithAccessCheck;
-using ::art::llvm::runtime_support::FindInterfaceMethod;
-using ::art::llvm::runtime_support::FindInterfaceMethodWithAccessCheck;
-using ::art::llvm::runtime_support::FindStaticMethodWithAccessCheck;
-using ::art::llvm::runtime_support::FindSuperMethodWithAccessCheck;
-using ::art::llvm::runtime_support::FindVirtualMethodWithAccessCheck;
-using ::art::llvm::runtime_support::Get32Instance;
-using ::art::llvm::runtime_support::Get32Static;
-using ::art::llvm::runtime_support::Get64Instance;
-using ::art::llvm::runtime_support::Get64Static;
-using ::art::llvm::runtime_support::GetObjectInstance;
-using ::art::llvm::runtime_support::GetObjectStatic;
-using ::art::llvm::runtime_support::InitializeStaticStorage;
-using ::art::llvm::runtime_support::InitializeType;
-using ::art::llvm::runtime_support::InitializeTypeAndVerifyAccess;
-using ::art::llvm::runtime_support::IsAssignable;
-using ::art::llvm::runtime_support::ResolveString;
-using ::art::llvm::runtime_support::RuntimeId;
-using ::art::llvm::runtime_support::Set32Instance;
-using ::art::llvm::runtime_support::Set32Static;
-using ::art::llvm::runtime_support::Set64Instance;
-using ::art::llvm::runtime_support::Set64Static;
-using ::art::llvm::runtime_support::SetObjectInstance;
-using ::art::llvm::runtime_support::SetObjectStatic;
-using ::art::llvm::runtime_support::ThrowDivZeroException;
-using ::art::llvm::runtime_support::ThrowException;
-using ::art::llvm::runtime_support::ThrowIndexOutOfBounds;
-using ::art::llvm::runtime_support::ThrowNullPointerException;
-using ::art::llvm::runtime_support::ThrowStackOverflowException;
-using ::art::llvm::runtime_support::art_d2i;
-using ::art::llvm::runtime_support::art_d2l;
-using ::art::llvm::runtime_support::art_f2i;
-using ::art::llvm::runtime_support::art_f2l;
-
-namespace art {
-extern char RemapShorty(char shortyType);
-} // namespace art
-
-namespace {
-
-class GBCExpanderPass : public llvm::FunctionPass {
- private:
- const IntrinsicHelper& intrinsic_helper_;
- IRBuilder& irb_;
-
- llvm::LLVMContext& context_;
- RuntimeSupportBuilder& rtb_;
-
- private:
- llvm::AllocaInst* shadow_frame_;
- llvm::Value* old_shadow_frame_;
-
- private:
- art::CompilerDriver* const driver_;
-
- const art::DexCompilationUnit* const dex_compilation_unit_;
-
- llvm::Function* func_;
-
- std::vector<llvm::BasicBlock*> basic_blocks_;
-
- std::vector<llvm::BasicBlock*> basic_block_landing_pads_;
- llvm::BasicBlock* current_bb_;
- std::map<llvm::BasicBlock*, std::vector<std::pair<llvm::BasicBlock*, llvm::BasicBlock*>>>
- landing_pad_phi_mapping_;
- llvm::BasicBlock* basic_block_unwind_;
-
- // Maps each vreg to its shadow frame address.
- std::vector<llvm::Value*> shadow_frame_vreg_addresses_;
-
- bool changed_;
-
- private:
- //----------------------------------------------------------------------------
- // Constant for GBC expansion
- //----------------------------------------------------------------------------
- enum IntegerShiftKind {
- kIntegerSHL,
- kIntegerSHR,
- kIntegerUSHR,
- };
-
- private:
- //----------------------------------------------------------------------------
- // Helper function for GBC expansion
- //----------------------------------------------------------------------------
-
- llvm::Value* ExpandToRuntime(RuntimeId rt, llvm::CallInst& inst);
-
- uint64_t LV2UInt(llvm::Value* lv) {
- return llvm::cast<llvm::ConstantInt>(lv)->getZExtValue();
- }
-
- int64_t LV2SInt(llvm::Value* lv) {
- return llvm::cast<llvm::ConstantInt>(lv)->getSExtValue();
- }
-
- private:
- // TODO: Almost all Emit* are directly copy-n-paste from MethodCompiler.
- // Refactor these utility functions from MethodCompiler to avoid forking.
-
- void EmitStackOverflowCheck(llvm::Instruction* first_non_alloca);
-
- void RewriteFunction();
-
- void RewriteBasicBlock(llvm::BasicBlock* original_block);
-
- void UpdatePhiInstruction(llvm::BasicBlock* old_basic_block,
- llvm::BasicBlock* new_basic_block);
-
-
- // Sign or zero extend category 1 types < 32bits in size to 32bits.
- llvm::Value* SignOrZeroExtendCat1Types(llvm::Value* value, JType jty);
-
- // Truncate category 1 types from 32bits to the given JType size.
- llvm::Value* TruncateCat1Types(llvm::Value* value, JType jty);
-
- //----------------------------------------------------------------------------
- // Dex cache code generation helper function
- //----------------------------------------------------------------------------
- llvm::Value* EmitLoadDexCacheAddr(art::MemberOffset dex_cache_offset);
-
- llvm::Value* EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx);
-
- llvm::Value* EmitLoadDexCacheResolvedMethodFieldAddr(uint32_t method_idx);
-
- llvm::Value* EmitLoadDexCacheStringFieldAddr(uint32_t string_idx);
-
- //----------------------------------------------------------------------------
- // Code generation helper function
- //----------------------------------------------------------------------------
- llvm::Value* EmitLoadMethodObjectAddr();
-
- llvm::Value* EmitLoadArrayLength(llvm::Value* array);
-
- llvm::Value* EmitLoadSDCalleeMethodObjectAddr(uint32_t callee_method_idx);
-
- llvm::Value* EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx,
- llvm::Value* this_addr);
-
- llvm::Value* EmitArrayGEP(llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty);
-
- //----------------------------------------------------------------------------
- // Invoke helper function
- //----------------------------------------------------------------------------
- llvm::Value* EmitInvoke(llvm::CallInst& call_inst);
-
- //----------------------------------------------------------------------------
- // Inlining helper functions
- //----------------------------------------------------------------------------
- bool EmitIntrinsic(llvm::CallInst& call_inst, llvm::Value** result);
-
- bool EmitIntrinsicStringLengthOrIsEmpty(llvm::CallInst& call_inst,
- llvm::Value** result, bool is_empty);
-
- private:
- //----------------------------------------------------------------------------
- // Expand Greenland intrinsics
- //----------------------------------------------------------------------------
- void Expand_TestSuspend(llvm::CallInst& call_inst);
-
- void Expand_MarkGCCard(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_LoadStringFromDexCache(llvm::Value* string_idx_value);
-
- llvm::Value* Expand_LoadTypeFromDexCache(llvm::Value* type_idx_value);
-
- void Expand_LockObject(llvm::Value* obj);
-
- void Expand_UnlockObject(llvm::Value* obj);
-
- llvm::Value* Expand_ArrayGet(llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty);
-
- void Expand_ArrayPut(llvm::Value* new_value,
- llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty);
-
- void Expand_FilledNewArray(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_IGetFast(llvm::Value* field_offset_value,
- llvm::Value* is_volatile_value,
- llvm::Value* object_addr,
- JType field_jty);
-
- void Expand_IPutFast(llvm::Value* field_offset_value,
- llvm::Value* is_volatile_value,
- llvm::Value* object_addr,
- llvm::Value* new_value,
- JType field_jty);
-
- llvm::Value* Expand_SGetFast(llvm::Value* static_storage_addr,
- llvm::Value* field_offset_value,
- llvm::Value* is_volatile_value,
- JType field_jty);
-
- void Expand_SPutFast(llvm::Value* static_storage_addr,
- llvm::Value* field_offset_value,
- llvm::Value* is_volatile_value,
- llvm::Value* new_value,
- JType field_jty);
-
- llvm::Value* Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr);
-
- llvm::Value*
- Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value);
-
- llvm::Value*
- Expand_GetVirtualCalleeMethodObjAddrFast(llvm::Value* vtable_idx_value,
- llvm::Value* this_addr);
-
- llvm::Value* Expand_Invoke(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_DivRem(llvm::CallInst& call_inst, bool is_div, JType op_jty);
-
- void Expand_AllocaShadowFrame(llvm::Value* num_vregs_value);
-
- void Expand_SetVReg(llvm::Value* entry_idx, llvm::Value* obj);
-
- void Expand_PopShadowFrame();
-
- void Expand_UpdateDexPC(llvm::Value* dex_pc_value);
-
- //----------------------------------------------------------------------------
- // Quick
- //----------------------------------------------------------------------------
-
- llvm::Value* Expand_FPCompare(llvm::Value* src1_value,
- llvm::Value* src2_value,
- bool gt_bias);
-
- llvm::Value* Expand_LongCompare(llvm::Value* src1_value, llvm::Value* src2_value);
-
- llvm::Value* EmitCompareResultSelection(llvm::Value* cmp_eq,
- llvm::Value* cmp_lt);
-
- llvm::Value* EmitLoadConstantClass(uint32_t dex_pc, uint32_t type_idx);
- llvm::Value* EmitLoadStaticStorage(uint32_t dex_pc, uint32_t type_idx);
-
- llvm::Value* Expand_HLIGet(llvm::CallInst& call_inst, JType field_jty);
- void Expand_HLIPut(llvm::CallInst& call_inst, JType field_jty);
-
- llvm::Value* Expand_HLSget(llvm::CallInst& call_inst, JType field_jty);
- void Expand_HLSput(llvm::CallInst& call_inst, JType field_jty);
-
- llvm::Value* Expand_HLArrayGet(llvm::CallInst& call_inst, JType field_jty);
- void Expand_HLArrayPut(llvm::CallInst& call_inst, JType field_jty);
-
- llvm::Value* Expand_ConstString(llvm::CallInst& call_inst);
- llvm::Value* Expand_ConstClass(llvm::CallInst& call_inst);
-
- void Expand_MonitorEnter(llvm::CallInst& call_inst);
- void Expand_MonitorExit(llvm::CallInst& call_inst);
-
- void Expand_HLCheckCast(llvm::CallInst& call_inst);
- llvm::Value* Expand_InstanceOf(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_NewInstance(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_HLInvoke(llvm::CallInst& call_inst);
-
- llvm::Value* Expand_OptArrayLength(llvm::CallInst& call_inst);
- llvm::Value* Expand_NewArray(llvm::CallInst& call_inst);
- llvm::Value* Expand_HLFilledNewArray(llvm::CallInst& call_inst);
- void Expand_HLFillArrayData(llvm::CallInst& call_inst);
-
- llvm::Value* EmitAllocNewArray(uint32_t dex_pc,
- llvm::Value* array_length_value,
- uint32_t type_idx,
- bool is_filled_new_array);
-
- llvm::Value* EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx,
- art::InvokeType invoke_type,
- llvm::Value* this_addr,
- uint32_t dex_pc,
- bool is_fast_path);
-
- void EmitMarkGCCard(llvm::Value* value, llvm::Value* target_addr);
-
- void EmitUpdateDexPC(uint32_t dex_pc);
-
- void EmitGuard_DivZeroException(uint32_t dex_pc,
- llvm::Value* denominator,
- JType op_jty);
-
- void EmitGuard_NullPointerException(uint32_t dex_pc, llvm::Value* object,
- int opt_flags);
-
- void EmitGuard_ArrayIndexOutOfBoundsException(uint32_t dex_pc,
- llvm::Value* array,
- llvm::Value* index,
- int opt_flags);
-
- llvm::FunctionType* GetFunctionType(llvm::Type* ret_type, uint32_t method_idx, bool is_static);
-
- llvm::BasicBlock* GetBasicBlock(uint32_t dex_pc);
-
- llvm::BasicBlock* CreateBasicBlockWithDexPC(uint32_t dex_pc,
- const char* postfix);
-
- int32_t GetTryItemOffset(uint32_t dex_pc);
-
- llvm::BasicBlock* GetLandingPadBasicBlock(uint32_t dex_pc);
-
- llvm::BasicBlock* GetUnwindBasicBlock();
-
- void EmitGuard_ExceptionLandingPad(uint32_t dex_pc);
-
- void EmitBranchExceptionLandingPad(uint32_t dex_pc);
-
- //----------------------------------------------------------------------------
- // Expand Arithmetic Helper Intrinsics
- //----------------------------------------------------------------------------
-
- llvm::Value* Expand_IntegerShift(llvm::Value* src1_value,
- llvm::Value* src2_value,
- IntegerShiftKind kind,
- JType op_jty);
-
- public:
- static char ID;
-
- GBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
- art::CompilerDriver* driver, const art::DexCompilationUnit* dex_compilation_unit)
- : llvm::FunctionPass(ID), intrinsic_helper_(intrinsic_helper), irb_(irb),
- context_(irb.getContext()), rtb_(irb.Runtime()),
- shadow_frame_(NULL), old_shadow_frame_(NULL),
- driver_(driver),
- dex_compilation_unit_(dex_compilation_unit),
- func_(NULL), current_bb_(NULL), basic_block_unwind_(NULL), changed_(false) {}
-
- bool runOnFunction(llvm::Function& func);
-
- private:
- void InsertStackOverflowCheck(llvm::Function& func);
-
- llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
- llvm::CallInst& call_inst);
-};
-
-char GBCExpanderPass::ID = 0;
-
-bool GBCExpanderPass::runOnFunction(llvm::Function& func) {
- VLOG(compiler) << "GBC expansion on " << func.getName().str();
-
- // Runtime support or stub
- if (dex_compilation_unit_ == NULL) {
- return false;
- }
-
- // Setup rewrite context
- shadow_frame_ = NULL;
- old_shadow_frame_ = NULL;
- func_ = &func;
- changed_ = false; // Assume unchanged
-
- shadow_frame_vreg_addresses_.resize(dex_compilation_unit_->GetCodeItem()->registers_size_, NULL);
- basic_blocks_.resize(dex_compilation_unit_->GetCodeItem()->insns_size_in_code_units_);
- basic_block_landing_pads_.resize(dex_compilation_unit_->GetCodeItem()->tries_size_, NULL);
- basic_block_unwind_ = NULL;
- for (llvm::Function::iterator bb_iter = func_->begin(), bb_end = func_->end();
- bb_iter != bb_end;
- ++bb_iter) {
- if (bb_iter->begin()->getMetadata("DexOff") == NULL) {
- continue;
- }
- uint32_t dex_pc = LV2UInt(bb_iter->begin()->getMetadata("DexOff")->getOperand(0));
- basic_blocks_[dex_pc] = bb_iter;
- }
-
- // Insert stack overflow check
- InsertStackOverflowCheck(func); // TODO: Use intrinsic.
-
- // Rewrite the intrinsics
- RewriteFunction();
-
- VERIFY_LLVM_FUNCTION(func);
-
- return changed_;
-}
-
-void GBCExpanderPass::RewriteBasicBlock(llvm::BasicBlock* original_block) {
- llvm::BasicBlock* curr_basic_block = original_block;
-
- llvm::BasicBlock::iterator inst_iter = original_block->begin();
- llvm::BasicBlock::iterator inst_end = original_block->end();
-
- while (inst_iter != inst_end) {
- llvm::CallInst* call_inst = llvm::dyn_cast<llvm::CallInst>(inst_iter);
- IntrinsicHelper::IntrinsicId intr_id = IntrinsicHelper::UnknownId;
-
- if (call_inst) {
- llvm::Function* callee_func = call_inst->getCalledFunction();
- intr_id = intrinsic_helper_.GetIntrinsicId(callee_func);
- }
-
- if (intr_id == IntrinsicHelper::UnknownId) {
- // This is not intrinsic call. Skip this instruction.
- ++inst_iter;
- continue;
- }
-
- // Rewrite the intrinsic and change the function
- changed_ = true;
- irb_.SetInsertPoint(inst_iter);
-
- // Expand the intrinsic
- if (llvm::Value* new_value = ExpandIntrinsic(intr_id, *call_inst)) {
- inst_iter->replaceAllUsesWith(new_value);
- }
-
- // Remove the old intrinsic call instruction
- llvm::BasicBlock::iterator old_inst = inst_iter++;
- old_inst->eraseFromParent();
-
- // Splice the instruction to the new basic block
- llvm::BasicBlock* next_basic_block = irb_.GetInsertBlock();
- if (next_basic_block != curr_basic_block) {
- next_basic_block->getInstList().splice(
- irb_.GetInsertPoint(), curr_basic_block->getInstList(),
- inst_iter, inst_end);
- curr_basic_block = next_basic_block;
- inst_end = curr_basic_block->end();
- }
- }
-}
-
-
-void GBCExpanderPass::RewriteFunction() {
- size_t num_basic_blocks = func_->getBasicBlockList().size();
- // NOTE: We are not using (bb_iter != bb_end) as the for-loop condition,
- // because we will create new basic block while expanding the intrinsics.
- // We only want to iterate through the input basic blocks.
-
- landing_pad_phi_mapping_.clear();
-
- for (llvm::Function::iterator bb_iter = func_->begin();
- num_basic_blocks > 0; ++bb_iter, --num_basic_blocks) {
- // Set insert point to current basic block.
- irb_.SetInsertPoint(bb_iter);
-
- current_bb_ = bb_iter;
-
- // Rewrite the basic block
- RewriteBasicBlock(bb_iter);
-
- // Update the phi-instructions in the successor basic block
- llvm::BasicBlock* last_block = irb_.GetInsertBlock();
- if (last_block != bb_iter) {
- UpdatePhiInstruction(bb_iter, last_block);
- }
- }
-
- typedef std::map<llvm::PHINode*, llvm::PHINode*> HandlerPHIMap;
- HandlerPHIMap handler_phi;
- // Iterate every used landing pad basic block
- for (size_t i = 0, ei = basic_block_landing_pads_.size(); i != ei; ++i) {
- llvm::BasicBlock* lbb = basic_block_landing_pads_[i];
- if (lbb == NULL) {
- continue;
- }
-
- llvm::TerminatorInst* term_inst = lbb->getTerminator();
- std::vector<std::pair<llvm::BasicBlock*, llvm::BasicBlock*>>& rewrite_pair
- = landing_pad_phi_mapping_[lbb];
- irb_.SetInsertPoint(lbb->begin());
-
- // Iterate every succeeding basic block (catch block)
- for (unsigned succ_iter = 0, succ_end = term_inst->getNumSuccessors();
- succ_iter != succ_end; ++succ_iter) {
- llvm::BasicBlock* succ_basic_block = term_inst->getSuccessor(succ_iter);
-
- // Iterate every phi instructions in the succeeding basic block
- for (llvm::BasicBlock::iterator
- inst_iter = succ_basic_block->begin(),
- inst_end = succ_basic_block->end();
- inst_iter != inst_end; ++inst_iter) {
- llvm::PHINode *phi = llvm::dyn_cast<llvm::PHINode>(inst_iter);
-
- if (!phi) {
- break; // Meet non-phi instruction. Done.
- }
-
- if (handler_phi[phi] == NULL) {
- handler_phi[phi] = llvm::PHINode::Create(phi->getType(), 1);
- }
-
- // Create new_phi in landing pad
- llvm::PHINode* new_phi = irb_.CreatePHI(phi->getType(), rewrite_pair.size());
- // Insert all incoming value into new_phi by rewrite_pair
- for (size_t j = 0, ej = rewrite_pair.size(); j != ej; ++j) {
- llvm::BasicBlock* old_bb = rewrite_pair[j].first;
- llvm::BasicBlock* new_bb = rewrite_pair[j].second;
- new_phi->addIncoming(phi->getIncomingValueForBlock(old_bb), new_bb);
- }
- // Delete all incoming value from phi by rewrite_pair
- for (size_t j = 0, ej = rewrite_pair.size(); j != ej; ++j) {
- llvm::BasicBlock* old_bb = rewrite_pair[j].first;
- int old_bb_idx = phi->getBasicBlockIndex(old_bb);
- if (old_bb_idx >= 0) {
- phi->removeIncomingValue(old_bb_idx, false);
- }
- }
- // Insert new_phi into new handler phi
- handler_phi[phi]->addIncoming(new_phi, lbb);
- }
- }
- }
-
- // Replace all handler phi
- // We can't just use the old handler phi, because some exception edges will disappear after we
- // compute fast-path.
- for (HandlerPHIMap::iterator it = handler_phi.begin(); it != handler_phi.end(); ++it) {
- llvm::PHINode* old_phi = it->first;
- llvm::PHINode* new_phi = it->second;
- new_phi->insertBefore(old_phi);
- old_phi->replaceAllUsesWith(new_phi);
- old_phi->eraseFromParent();
- }
-}
-
-void GBCExpanderPass::UpdatePhiInstruction(llvm::BasicBlock* old_basic_block,
- llvm::BasicBlock* new_basic_block) {
- llvm::TerminatorInst* term_inst = new_basic_block->getTerminator();
-
- if (!term_inst) {
- return; // No terminating instruction in new_basic_block. Nothing to do.
- }
-
- // Iterate every succeeding basic block
- for (unsigned succ_iter = 0, succ_end = term_inst->getNumSuccessors();
- succ_iter != succ_end; ++succ_iter) {
- llvm::BasicBlock* succ_basic_block = term_inst->getSuccessor(succ_iter);
-
- // Iterate every phi instructions in the succeeding basic block
- for (llvm::BasicBlock::iterator
- inst_iter = succ_basic_block->begin(),
- inst_end = succ_basic_block->end();
- inst_iter != inst_end; ++inst_iter) {
- llvm::PHINode *phi = llvm::dyn_cast<llvm::PHINode>(inst_iter);
-
- if (!phi) {
- break; // Meet non-phi instruction. Done.
- }
-
- // Update the incoming block of this phi instruction
- for (llvm::PHINode::block_iterator
- ibb_iter = phi->block_begin(), ibb_end = phi->block_end();
- ibb_iter != ibb_end; ++ibb_iter) {
- if (*ibb_iter == old_basic_block) {
- *ibb_iter = new_basic_block;
- }
- }
- }
- }
-}
-
-llvm::Value* GBCExpanderPass::ExpandToRuntime(RuntimeId rt, llvm::CallInst& inst) {
- // Some GBC intrinsic can directly replace with IBC runtime. "Directly" means
- // the arguments passed to the GBC intrinsic are as the same as IBC runtime
- // function, therefore only called function is needed to change.
- unsigned num_args = inst.getNumArgOperands();
-
- if (num_args <= 0) {
- return irb_.CreateCall(irb_.GetRuntime(rt));
- } else {
- std::vector<llvm::Value*> args;
- for (unsigned i = 0; i < num_args; i++) {
- args.push_back(inst.getArgOperand(i));
- }
-
- return irb_.CreateCall(irb_.GetRuntime(rt), args);
- }
-}
-
-void
-GBCExpanderPass::EmitStackOverflowCheck(llvm::Instruction* first_non_alloca) {
- llvm::Function* func = first_non_alloca->getParent()->getParent();
- llvm::Module* module = func->getParent();
-
- // Call llvm intrinsic function to get frame address.
- llvm::Function* frameaddress =
- llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::frameaddress);
-
- // The type of llvm::frameaddress is: i8* @llvm.frameaddress(i32)
- llvm::Value* frame_address = irb_.CreateCall(frameaddress, irb_.getInt32(0));
-
- // Cast i8* to int
- frame_address = irb_.CreatePtrToInt(frame_address, irb_.getPtrEquivIntTy());
-
- // Get thread.stack_end_
- llvm::Value* stack_end =
- irb_.Runtime().EmitLoadFromThreadOffset(art::Thread::StackEndOffset().Int32Value(),
- irb_.getPtrEquivIntTy(),
- kTBAARuntimeInfo);
-
- // Check the frame address < thread.stack_end_ ?
- llvm::Value* is_stack_overflow = irb_.CreateICmpULT(frame_address, stack_end);
-
- llvm::BasicBlock* block_exception =
- llvm::BasicBlock::Create(context_, "stack_overflow", func);
-
- llvm::BasicBlock* block_continue =
- llvm::BasicBlock::Create(context_, "stack_overflow_cont", func);
-
- irb_.CreateCondBr(is_stack_overflow, block_exception, block_continue, kUnlikely);
-
- // If stack overflow, throw exception.
- irb_.SetInsertPoint(block_exception);
- irb_.CreateCall(irb_.GetRuntime(ThrowStackOverflowException));
-
- // Unwind.
- llvm::Type* ret_type = func->getReturnType();
- if (ret_type->isVoidTy()) {
- irb_.CreateRetVoid();
- } else {
- // The return value is ignored when there's an exception. MethodCompiler
- // returns zero value under the the corresponding return type in this case.
- // GBCExpander returns LLVM undef value here for brevity
- irb_.CreateRet(llvm::UndefValue::get(ret_type));
- }
-
- irb_.SetInsertPoint(block_continue);
-}
-
-llvm::Value* GBCExpanderPass::EmitLoadDexCacheAddr(art::MemberOffset offset) {
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- return irb_.LoadFromObjectOffset(method_object_addr,
- offset.Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
-}
-
-llvm::Value*
-GBCExpanderPass::EmitLoadDexCacheResolvedTypeFieldAddr(uint32_t type_idx) {
- llvm::Value* resolved_type_dex_cache_addr =
- EmitLoadDexCacheAddr(art::mirror::ArtMethod::DexCacheResolvedTypesOffset());
-
- llvm::Value* type_idx_value = irb_.getPtrEquivInt(type_idx);
-
- return EmitArrayGEP(resolved_type_dex_cache_addr, type_idx_value, kObject);
-}
-
-llvm::Value* GBCExpanderPass::
-EmitLoadDexCacheResolvedMethodFieldAddr(uint32_t method_idx) {
- llvm::Value* resolved_method_dex_cache_addr =
- EmitLoadDexCacheAddr(art::mirror::ArtMethod::DexCacheResolvedMethodsOffset());
-
- llvm::Value* method_idx_value = irb_.getPtrEquivInt(method_idx);
-
- return EmitArrayGEP(resolved_method_dex_cache_addr, method_idx_value, kObject);
-}
-
-llvm::Value* GBCExpanderPass::
-EmitLoadDexCacheStringFieldAddr(uint32_t string_idx) {
- llvm::Value* string_dex_cache_addr =
- EmitLoadDexCacheAddr(art::mirror::ArtMethod::DexCacheStringsOffset());
-
- llvm::Value* string_idx_value = irb_.getPtrEquivInt(string_idx);
-
- return EmitArrayGEP(string_dex_cache_addr, string_idx_value, kObject);
-}
-
-llvm::Value* GBCExpanderPass::EmitLoadMethodObjectAddr() {
- llvm::Function* parent_func = irb_.GetInsertBlock()->getParent();
- return parent_func->arg_begin();
-}
-
-llvm::Value* GBCExpanderPass::EmitLoadArrayLength(llvm::Value* array) {
- // Load array length
- return irb_.LoadFromObjectOffset(array,
- art::mirror::Array::LengthOffset().Int32Value(),
- irb_.getJIntTy(),
- kTBAAConstJObject);
-}
-
-llvm::Value*
-GBCExpanderPass::EmitLoadSDCalleeMethodObjectAddr(uint32_t callee_method_idx) {
- llvm::Value* callee_method_object_field_addr =
- EmitLoadDexCacheResolvedMethodFieldAddr(callee_method_idx);
-
- return irb_.CreateLoad(callee_method_object_field_addr, kTBAARuntimeInfo);
-}
-
-llvm::Value* GBCExpanderPass::
-EmitLoadVirtualCalleeMethodObjectAddr(int vtable_idx, llvm::Value* this_addr) {
- // Load class object of *this* pointer
- llvm::Value* class_object_addr =
- irb_.LoadFromObjectOffset(this_addr,
- art::mirror::Object::ClassOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
-
- // Load vtable address
- llvm::Value* vtable_addr =
- irb_.LoadFromObjectOffset(class_object_addr,
- art::mirror::Class::VTableOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
-
- // Load callee method object
- llvm::Value* vtable_idx_value =
- irb_.getPtrEquivInt(static_cast<uint64_t>(vtable_idx));
-
- llvm::Value* method_field_addr =
- EmitArrayGEP(vtable_addr, vtable_idx_value, kObject);
-
- return irb_.CreateLoad(method_field_addr, kTBAAConstJObject);
-}
-
-// Emit Array GetElementPtr
-llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty) {
- int data_offset;
- if (elem_jty == kLong || elem_jty == kDouble ||
- (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) {
- data_offset = art::mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = art::mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- llvm::Constant* data_offset_value =
- irb_.getPtrEquivInt(data_offset);
-
- llvm::Type* elem_type = irb_.getJType(elem_jty);
-
- llvm::Value* array_data_addr =
- irb_.CreatePtrDisp(array_addr, data_offset_value,
- elem_type->getPointerTo());
-
- return irb_.CreateGEP(array_data_addr, index_value);
-}
-
-llvm::Value* GBCExpanderPass::EmitInvoke(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- art::InvokeType invoke_type =
- static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
- bool is_static = (invoke_type == art::kStatic);
- art::MethodReference target_method(dex_compilation_unit_->GetDexFile(),
- LV2UInt(call_inst.getArgOperand(1)));
-
- // Load *this* actual parameter
- llvm::Value* this_addr = (!is_static) ? call_inst.getArgOperand(3) : NULL;
-
- // Compute invoke related information for compiler decision
- int vtable_idx = -1;
- uintptr_t direct_code = 0;
- uintptr_t direct_method = 0;
- bool is_fast_path = driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc,
- true, true,
- &invoke_type, &target_method,
- &vtable_idx,
- &direct_code, &direct_method);
- // Load the method object
- llvm::Value* callee_method_object_addr = NULL;
-
- if (!is_fast_path) {
- callee_method_object_addr =
- EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index, invoke_type,
- this_addr, dex_pc, is_fast_path);
- } else {
- switch (invoke_type) {
- case art::kStatic:
- case art::kDirect:
- if (direct_method != 0u &&
- direct_method != static_cast<uintptr_t>(-1)) {
- callee_method_object_addr =
- irb_.CreateIntToPtr(irb_.getPtrEquivInt(direct_method),
- irb_.getJObjectTy());
- } else {
- callee_method_object_addr =
- EmitLoadSDCalleeMethodObjectAddr(target_method.dex_method_index);
- }
- break;
-
- case art::kVirtual:
- DCHECK_NE(vtable_idx, -1);
- callee_method_object_addr =
- EmitLoadVirtualCalleeMethodObjectAddr(vtable_idx, this_addr);
- break;
-
- case art::kSuper:
- LOG(FATAL) << "invoke-super should be promoted to invoke-direct in "
- "the fast path.";
- break;
-
- case art::kInterface:
- callee_method_object_addr =
- EmitCallRuntimeForCalleeMethodObjectAddr(target_method.dex_method_index,
- invoke_type, this_addr,
- dex_pc, is_fast_path);
- break;
- }
- }
-
- // Load the actual parameter
- std::vector<llvm::Value*> args;
-
- args.push_back(callee_method_object_addr); // method object for callee
-
- for (uint32_t i = 3; i < call_inst.getNumArgOperands(); ++i) {
- args.push_back(call_inst.getArgOperand(i));
- }
-
- llvm::Value* code_addr;
- llvm::Type* func_type = GetFunctionType(call_inst.getType(),
- target_method.dex_method_index, is_static);
- if (direct_code != 0u && direct_code != static_cast<uintptr_t>(-1)) {
- code_addr =
- irb_.CreateIntToPtr(irb_.getPtrEquivInt(direct_code),
- func_type->getPointerTo());
- } else {
- code_addr =
- irb_.LoadFromObjectOffset(callee_method_object_addr,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value(),
- func_type->getPointerTo(), kTBAARuntimeInfo);
- }
-
- // Invoke callee
- EmitUpdateDexPC(dex_pc);
- llvm::Value* retval = irb_.CreateCall(code_addr, args);
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return retval;
-}
-
-bool GBCExpanderPass::EmitIntrinsic(llvm::CallInst& call_inst,
- llvm::Value** result) {
- DCHECK(result != NULL);
-
- uint32_t callee_method_idx = LV2UInt(call_inst.getArgOperand(1));
- std::string callee_method_name(
- PrettyMethod(callee_method_idx, *dex_compilation_unit_->GetDexFile()));
-
- if (callee_method_name == "int java.lang.String.length()") {
- return EmitIntrinsicStringLengthOrIsEmpty(call_inst, result,
- false /* is_empty */);
- }
- if (callee_method_name == "boolean java.lang.String.isEmpty()") {
- return EmitIntrinsicStringLengthOrIsEmpty(call_inst, result,
- true /* is_empty */);
- }
-
- *result = NULL;
- return false;
-}
-
-bool GBCExpanderPass::EmitIntrinsicStringLengthOrIsEmpty(llvm::CallInst& call_inst,
- llvm::Value** result,
- bool is_empty) {
- art::InvokeType invoke_type =
- static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
- DCHECK_NE(invoke_type, art::kStatic);
- DCHECK_EQ(call_inst.getNumArgOperands(), 4U);
-
- llvm::Value* this_object = call_inst.getArgOperand(3);
- llvm::Value* string_count =
- irb_.LoadFromObjectOffset(this_object,
- art::mirror::String::CountOffset().Int32Value(),
- irb_.getJIntTy(),
- kTBAAConstJObject);
- if (is_empty) {
- llvm::Value* count_equals_zero = irb_.CreateICmpEQ(string_count,
- irb_.getJInt(0));
- llvm::Value* is_empty = irb_.CreateSelect(count_equals_zero,
- irb_.getJBoolean(true),
- irb_.getJBoolean(false));
- is_empty = SignOrZeroExtendCat1Types(is_empty, kBoolean);
- *result = is_empty;
- } else {
- *result = string_count;
- }
- return true;
-}
-
-void GBCExpanderPass::Expand_TestSuspend(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
-
- llvm::Value* suspend_count =
- irb_.Runtime().EmitLoadFromThreadOffset(art::Thread::ThreadFlagsOffset().Int32Value(),
- irb_.getInt16Ty(),
- kTBAARuntimeInfo);
- llvm::Value* is_suspend = irb_.CreateICmpNE(suspend_count, irb_.getInt16(0));
-
- llvm::BasicBlock* basic_block_suspend = CreateBasicBlockWithDexPC(dex_pc, "suspend");
- llvm::BasicBlock* basic_block_cont = CreateBasicBlockWithDexPC(dex_pc, "suspend_cont");
-
- irb_.CreateCondBr(is_suspend, basic_block_suspend, basic_block_cont, kUnlikely);
-
- irb_.SetInsertPoint(basic_block_suspend);
- if (dex_pc != art::DexFile::kDexNoIndex) {
- EmitUpdateDexPC(dex_pc);
- }
- irb_.Runtime().EmitTestSuspend();
-
- llvm::BasicBlock* basic_block_exception = CreateBasicBlockWithDexPC(dex_pc, "exception");
- llvm::Value* exception_pending = irb_.Runtime().EmitIsExceptionPending();
- irb_.CreateCondBr(exception_pending, basic_block_exception, basic_block_cont, kUnlikely);
-
- irb_.SetInsertPoint(basic_block_exception);
- llvm::Type* ret_type = call_inst.getParent()->getParent()->getReturnType();
- if (ret_type->isVoidTy()) {
- irb_.CreateRetVoid();
- } else {
- // The return value is ignored when there's an exception.
- irb_.CreateRet(llvm::UndefValue::get(ret_type));
- }
-
- irb_.SetInsertPoint(basic_block_cont);
- return;
-}
-
-void GBCExpanderPass::Expand_MarkGCCard(llvm::CallInst& call_inst) {
- irb_.Runtime().EmitMarkGCCard(call_inst.getArgOperand(0), call_inst.getArgOperand(1));
- return;
-}
-
-llvm::Value*
-GBCExpanderPass::Expand_LoadStringFromDexCache(llvm::Value* string_idx_value) {
- uint32_t string_idx =
- llvm::cast<llvm::ConstantInt>(string_idx_value)->getZExtValue();
-
- llvm::Value* string_field_addr = EmitLoadDexCacheStringFieldAddr(string_idx);
-
- return irb_.CreateLoad(string_field_addr, kTBAARuntimeInfo);
-}
-
-llvm::Value*
-GBCExpanderPass::Expand_LoadTypeFromDexCache(llvm::Value* type_idx_value) {
- uint32_t type_idx =
- llvm::cast<llvm::ConstantInt>(type_idx_value)->getZExtValue();
-
- llvm::Value* type_field_addr =
- EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
-
- return irb_.CreateLoad(type_field_addr, kTBAARuntimeInfo);
-}
-
-void GBCExpanderPass::Expand_LockObject(llvm::Value* obj) {
- rtb_.EmitLockObject(obj);
- return;
-}
-
-void GBCExpanderPass::Expand_UnlockObject(llvm::Value* obj) {
- rtb_.EmitUnlockObject(obj);
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_ArrayGet(llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty) {
- llvm::Value* array_elem_addr =
- EmitArrayGEP(array_addr, index_value, elem_jty);
-
- return irb_.CreateLoad(array_elem_addr, kTBAAHeapArray, elem_jty);
-}
-
-void GBCExpanderPass::Expand_ArrayPut(llvm::Value* new_value,
- llvm::Value* array_addr,
- llvm::Value* index_value,
- JType elem_jty) {
- llvm::Value* array_elem_addr =
- EmitArrayGEP(array_addr, index_value, elem_jty);
-
- irb_.CreateStore(new_value, array_elem_addr, kTBAAHeapArray, elem_jty);
-
- return;
-}
-
-void GBCExpanderPass::Expand_FilledNewArray(llvm::CallInst& call_inst) {
- // Most of the codes refer to MethodCompiler::EmitInsn_FilledNewArray
- llvm::Value* array = call_inst.getArgOperand(0);
-
- uint32_t element_jty =
- llvm::cast<llvm::ConstantInt>(call_inst.getArgOperand(1))->getZExtValue();
-
- DCHECK_GT(call_inst.getNumArgOperands(), 2U);
- unsigned num_elements = (call_inst.getNumArgOperands() - 2);
-
- bool is_elem_int_ty = (static_cast<JType>(element_jty) == kInt);
-
- uint32_t alignment;
- llvm::Constant* elem_size;
- llvm::PointerType* field_type;
-
- // NOTE: Currently filled-new-array only supports 'L', '[', and 'I'
- // as the element, thus we are only checking 2 cases: primitive int and
- // non-primitive type.
- if (is_elem_int_ty) {
- alignment = sizeof(int32_t);
- elem_size = irb_.getPtrEquivInt(sizeof(int32_t));
- field_type = irb_.getJIntTy()->getPointerTo();
- } else {
- alignment = irb_.getSizeOfPtrEquivInt();
- elem_size = irb_.getSizeOfPtrEquivIntValue();
- field_type = irb_.getJObjectTy()->getPointerTo();
- }
-
- llvm::Value* data_field_offset =
- irb_.getPtrEquivInt(art::mirror::Array::DataOffset(alignment).Int32Value());
-
- llvm::Value* data_field_addr =
- irb_.CreatePtrDisp(array, data_field_offset, field_type);
-
- for (unsigned i = 0; i < num_elements; ++i) {
- // Values to fill the array begin at the 3rd argument
- llvm::Value* reg_value = call_inst.getArgOperand(2 + i);
-
- irb_.CreateStore(reg_value, data_field_addr, kTBAAHeapArray);
-
- data_field_addr =
- irb_.CreatePtrDisp(data_field_addr, elem_size, field_type);
- }
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_IGetFast(llvm::Value* field_offset_value,
- llvm::Value* /*is_volatile_value*/,
- llvm::Value* object_addr,
- JType field_jty) {
- int field_offset =
- llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
-
- DCHECK_GE(field_offset, 0);
-
- llvm::PointerType* field_type =
- irb_.getJType(field_jty)->getPointerTo();
-
- field_offset_value = irb_.getPtrEquivInt(field_offset);
-
- llvm::Value* field_addr =
- irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
-
- // TODO: Check is_volatile. We need to generate atomic load instruction
- // when is_volatile is true.
- return irb_.CreateLoad(field_addr, kTBAAHeapInstance, field_jty);
-}
-
-void GBCExpanderPass::Expand_IPutFast(llvm::Value* field_offset_value,
- llvm::Value* /* is_volatile_value */,
- llvm::Value* object_addr,
- llvm::Value* new_value,
- JType field_jty) {
- int field_offset =
- llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
-
- DCHECK_GE(field_offset, 0);
-
- llvm::PointerType* field_type =
- irb_.getJType(field_jty)->getPointerTo();
-
- field_offset_value = irb_.getPtrEquivInt(field_offset);
-
- llvm::Value* field_addr =
- irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
-
- // TODO: Check is_volatile. We need to generate atomic store instruction
- // when is_volatile is true.
- irb_.CreateStore(new_value, field_addr, kTBAAHeapInstance, field_jty);
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_SGetFast(llvm::Value* static_storage_addr,
- llvm::Value* field_offset_value,
- llvm::Value* /*is_volatile_value*/,
- JType field_jty) {
- int field_offset =
- llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
-
- DCHECK_GE(field_offset, 0);
-
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
-
- llvm::Value* static_field_addr =
- irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
- irb_.getJType(field_jty)->getPointerTo());
-
- // TODO: Check is_volatile. We need to generate atomic store instruction
- // when is_volatile is true.
- return irb_.CreateLoad(static_field_addr, kTBAAHeapStatic, field_jty);
-}
-
-void GBCExpanderPass::Expand_SPutFast(llvm::Value* static_storage_addr,
- llvm::Value* field_offset_value,
- llvm::Value* /* is_volatile_value */,
- llvm::Value* new_value,
- JType field_jty) {
- int field_offset =
- llvm::cast<llvm::ConstantInt>(field_offset_value)->getSExtValue();
-
- DCHECK_GE(field_offset, 0);
-
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset);
-
- llvm::Value* static_field_addr =
- irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
- irb_.getJType(field_jty)->getPointerTo());
-
- // TODO: Check is_volatile. We need to generate atomic store instruction
- // when is_volatile is true.
- irb_.CreateStore(new_value, static_field_addr, kTBAAHeapStatic, field_jty);
-
- return;
-}
-
-llvm::Value*
-GBCExpanderPass::Expand_LoadDeclaringClassSSB(llvm::Value* method_object_addr) {
- return irb_.LoadFromObjectOffset(method_object_addr,
- art::mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
-}
-
-llvm::Value*
-GBCExpanderPass::Expand_GetSDCalleeMethodObjAddrFast(llvm::Value* callee_method_idx_value) {
- uint32_t callee_method_idx =
- llvm::cast<llvm::ConstantInt>(callee_method_idx_value)->getZExtValue();
-
- return EmitLoadSDCalleeMethodObjectAddr(callee_method_idx);
-}
-
-llvm::Value* GBCExpanderPass::Expand_GetVirtualCalleeMethodObjAddrFast(
- llvm::Value* vtable_idx_value,
- llvm::Value* this_addr) {
- int vtable_idx =
- llvm::cast<llvm::ConstantInt>(vtable_idx_value)->getSExtValue();
-
- return EmitLoadVirtualCalleeMethodObjectAddr(vtable_idx, this_addr);
-}
-
-llvm::Value* GBCExpanderPass::Expand_Invoke(llvm::CallInst& call_inst) {
- // Most of the codes refer to MethodCompiler::EmitInsn_Invoke
- llvm::Value* callee_method_object_addr = call_inst.getArgOperand(0);
- unsigned num_args = call_inst.getNumArgOperands();
- llvm::Type* ret_type = call_inst.getType();
-
- // Determine the function type of the callee method
- std::vector<llvm::Type*> args_type;
- std::vector<llvm::Value*> args;
- for (unsigned i = 0; i < num_args; i++) {
- args.push_back(call_inst.getArgOperand(i));
- args_type.push_back(args[i]->getType());
- }
-
- llvm::FunctionType* callee_method_type =
- llvm::FunctionType::get(ret_type, args_type, false);
-
- llvm::Value* code_addr =
- irb_.LoadFromObjectOffset(callee_method_object_addr,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value(),
- callee_method_type->getPointerTo(),
- kTBAARuntimeInfo);
-
- // Invoke callee
- llvm::Value* retval = irb_.CreateCall(code_addr, args);
-
- return retval;
-}
-
-llvm::Value* GBCExpanderPass::Expand_DivRem(llvm::CallInst& call_inst,
- bool is_div, JType op_jty) {
- llvm::Value* dividend = call_inst.getArgOperand(0);
- llvm::Value* divisor = call_inst.getArgOperand(1);
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- EmitGuard_DivZeroException(dex_pc, divisor, op_jty);
- // Most of the codes refer to MethodCompiler::EmitIntDivRemResultComputation
-
- // Check the special case: MININT / -1 = MININT
- // That case will cause overflow, which is undefined behavior in llvm.
- // So we check the divisor is -1 or not, if the divisor is -1, we do
- // the special path to avoid undefined behavior.
- llvm::Type* op_type = irb_.getJType(op_jty);
- llvm::Value* zero = irb_.getJZero(op_jty);
- llvm::Value* neg_one = llvm::ConstantInt::getSigned(op_type, -1);
-
- llvm::Function* parent = irb_.GetInsertBlock()->getParent();
- llvm::BasicBlock* eq_neg_one = llvm::BasicBlock::Create(context_, "", parent);
- llvm::BasicBlock* ne_neg_one = llvm::BasicBlock::Create(context_, "", parent);
- llvm::BasicBlock* neg_one_cont =
- llvm::BasicBlock::Create(context_, "", parent);
-
- llvm::Value* is_equal_neg_one = irb_.CreateICmpEQ(divisor, neg_one);
- irb_.CreateCondBr(is_equal_neg_one, eq_neg_one, ne_neg_one, kUnlikely);
-
- // If divisor == -1
- irb_.SetInsertPoint(eq_neg_one);
- llvm::Value* eq_result;
- if (is_div) {
- // We can just change from "dividend div -1" to "neg dividend". The sub
- // don't care the sign/unsigned because of two's complement representation.
- // And the behavior is what we want:
- // -(2^n) (2^n)-1
- // MININT < k <= MAXINT -> mul k -1 = -k
- // MININT == k -> mul k -1 = k
- //
- // LLVM use sub to represent 'neg'
- eq_result = irb_.CreateSub(zero, dividend);
- } else {
- // Everything modulo -1 will be 0.
- eq_result = zero;
- }
- irb_.CreateBr(neg_one_cont);
-
- // If divisor != -1, just do the division.
- irb_.SetInsertPoint(ne_neg_one);
- llvm::Value* ne_result;
- if (is_div) {
- ne_result = irb_.CreateSDiv(dividend, divisor);
- } else {
- ne_result = irb_.CreateSRem(dividend, divisor);
- }
- irb_.CreateBr(neg_one_cont);
-
- irb_.SetInsertPoint(neg_one_cont);
- llvm::PHINode* result = irb_.CreatePHI(op_type, 2);
- result->addIncoming(eq_result, eq_neg_one);
- result->addIncoming(ne_result, ne_neg_one);
-
- return result;
-}
-
-void GBCExpanderPass::Expand_AllocaShadowFrame(llvm::Value* num_vregs_value) {
- // Most of the codes refer to MethodCompiler::EmitPrologueAllocShadowFrame and
- // MethodCompiler::EmitPushShadowFrame
- uint16_t num_vregs =
- llvm::cast<llvm::ConstantInt>(num_vregs_value)->getZExtValue();
-
- llvm::StructType* shadow_frame_type =
- irb_.getShadowFrameTy(num_vregs);
-
- // Create allocas at the start of entry block.
- llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
- llvm::BasicBlock* entry_block = &func_->front();
- irb_.SetInsertPoint(&entry_block->front());
-
- shadow_frame_ = irb_.CreateAlloca(shadow_frame_type);
-
- // Alloca a pointer to old shadow frame
- old_shadow_frame_ =
- irb_.CreateAlloca(shadow_frame_type->getElementType(0)->getPointerTo());
-
- irb_.restoreIP(irb_ip_original);
-
- // Push the shadow frame
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* shadow_frame_upcast =
- irb_.CreateConstGEP2_32(shadow_frame_, 0, 0);
-
- llvm::Value* result = rtb_.EmitPushShadowFrame(shadow_frame_upcast,
- method_object_addr,
- num_vregs);
-
- irb_.CreateStore(result, old_shadow_frame_, kTBAARegister);
-
- return;
-}
-
-void GBCExpanderPass::Expand_SetVReg(llvm::Value* entry_idx,
- llvm::Value* value) {
- unsigned vreg_idx = LV2UInt(entry_idx);
- DCHECK_LT(vreg_idx, dex_compilation_unit_->GetCodeItem()->registers_size_);
-
- llvm::Value* vreg_addr = shadow_frame_vreg_addresses_[vreg_idx];
- if (UNLIKELY(vreg_addr == NULL)) {
- DCHECK(shadow_frame_ != NULL);
-
- llvm::Value* gep_index[] = {
- irb_.getInt32(0), // No pointer displacement
- irb_.getInt32(1), // VRegs
- entry_idx // Pointer field
- };
-
- // A shadow frame address must dominate every use in the function so we
- // place it in the entry block right after the allocas.
- llvm::BasicBlock::iterator first_non_alloca = func_->getEntryBlock().begin();
- while (llvm::isa<llvm::AllocaInst>(first_non_alloca)) {
- ++first_non_alloca;
- }
-
- llvm::IRBuilderBase::InsertPoint ip = irb_.saveIP();
- irb_.SetInsertPoint(static_cast<llvm::Instruction*>(first_non_alloca));
- vreg_addr = irb_.CreateGEP(shadow_frame_, gep_index);
- shadow_frame_vreg_addresses_[vreg_idx] = vreg_addr;
- irb_.restoreIP(ip);
- }
-
- irb_.CreateStore(value,
- irb_.CreateBitCast(vreg_addr, value->getType()->getPointerTo()),
- kTBAAShadowFrame);
- return;
-}
-
-void GBCExpanderPass::Expand_PopShadowFrame() {
- if (old_shadow_frame_ == NULL) {
- return;
- }
- rtb_.EmitPopShadowFrame(irb_.CreateLoad(old_shadow_frame_, kTBAARegister));
- return;
-}
-
-void GBCExpanderPass::Expand_UpdateDexPC(llvm::Value* dex_pc_value) {
- irb_.StoreToObjectOffset(shadow_frame_,
- art::ShadowFrame::DexPCOffset(),
- dex_pc_value,
- kTBAAShadowFrame);
- return;
-}
-
-void GBCExpanderPass::InsertStackOverflowCheck(llvm::Function& func) {
- // All alloca instructions are generated in the first basic block of the
- // function, and there are no alloca instructions after the first non-alloca
- // instruction.
-
- llvm::BasicBlock* first_basic_block = &func.front();
-
- // Look for first non-alloca instruction
- llvm::BasicBlock::iterator first_non_alloca = first_basic_block->begin();
- while (llvm::isa<llvm::AllocaInst>(first_non_alloca)) {
- ++first_non_alloca;
- }
-
- irb_.SetInsertPoint(first_non_alloca);
-
- // Insert stack overflow check codes before first_non_alloca (i.e., after all
- // alloca instructions)
- EmitStackOverflowCheck(&*first_non_alloca);
-
- irb_.Runtime().EmitTestSuspend();
-
- llvm::BasicBlock* next_basic_block = irb_.GetInsertBlock();
- if (next_basic_block != first_basic_block) {
- // Splice the rest of the instruction to the continuing basic block
- next_basic_block->getInstList().splice(
- irb_.GetInsertPoint(), first_basic_block->getInstList(),
- first_non_alloca, first_basic_block->end());
-
- // Rewrite the basic block
- RewriteBasicBlock(next_basic_block);
-
- // Update the phi-instructions in the successor basic block
- UpdatePhiInstruction(first_basic_block, irb_.GetInsertBlock());
- }
-
- // We have changed the basic block
- changed_ = true;
-}
-
-// ==== High-level intrinsic expander ==========================================
-
-llvm::Value* GBCExpanderPass::Expand_FPCompare(llvm::Value* src1_value,
- llvm::Value* src2_value,
- bool gt_bias) {
- llvm::Value* cmp_eq = irb_.CreateFCmpOEQ(src1_value, src2_value);
- llvm::Value* cmp_lt;
-
- if (gt_bias) {
- cmp_lt = irb_.CreateFCmpOLT(src1_value, src2_value);
- } else {
- cmp_lt = irb_.CreateFCmpULT(src1_value, src2_value);
- }
-
- return EmitCompareResultSelection(cmp_eq, cmp_lt);
-}
-
-llvm::Value* GBCExpanderPass::Expand_LongCompare(llvm::Value* src1_value, llvm::Value* src2_value) {
- llvm::Value* cmp_eq = irb_.CreateICmpEQ(src1_value, src2_value);
- llvm::Value* cmp_lt = irb_.CreateICmpSLT(src1_value, src2_value);
-
- return EmitCompareResultSelection(cmp_eq, cmp_lt);
-}
-
-llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq,
- llvm::Value* cmp_lt) {
- llvm::Constant* zero = irb_.getJInt(0);
- llvm::Constant* pos1 = irb_.getJInt(1);
- llvm::Constant* neg1 = irb_.getJInt(-1);
-
- llvm::Value* result_lt = irb_.CreateSelect(cmp_lt, neg1, pos1);
- llvm::Value* result_eq = irb_.CreateSelect(cmp_eq, zero, result_lt);
-
- return result_eq;
-}
-
-llvm::Value* GBCExpanderPass::Expand_IntegerShift(llvm::Value* src1_value,
- llvm::Value* src2_value,
- IntegerShiftKind kind,
- JType op_jty) {
- DCHECK(op_jty == kInt || op_jty == kLong);
-
- // Mask and zero-extend RHS properly
- if (op_jty == kInt) {
- src2_value = irb_.CreateAnd(src2_value, 0x1f);
- } else {
- llvm::Value* masked_src2_value = irb_.CreateAnd(src2_value, 0x3f);
- src2_value = irb_.CreateZExt(masked_src2_value, irb_.getJLongTy());
- }
-
- // Create integer shift llvm instruction
- switch (kind) {
- case kIntegerSHL:
- return irb_.CreateShl(src1_value, src2_value);
-
- case kIntegerSHR:
- return irb_.CreateAShr(src1_value, src2_value);
-
- case kIntegerUSHR:
- return irb_.CreateLShr(src1_value, src2_value);
-
- default:
- LOG(FATAL) << "Unknown integer shift kind: " << kind;
- return NULL;
- }
-}
-
-llvm::Value* GBCExpanderPass::SignOrZeroExtendCat1Types(llvm::Value* value, JType jty) {
- switch (jty) {
- case kBoolean:
- case kChar:
- return irb_.CreateZExt(value, irb_.getJType(kInt));
- case kByte:
- case kShort:
- return irb_.CreateSExt(value, irb_.getJType(kInt));
- case kVoid:
- case kInt:
- case kLong:
- case kFloat:
- case kDouble:
- case kObject:
- return value; // Nothing to do.
- default:
- LOG(FATAL) << "Unknown java type: " << jty;
- return NULL;
- }
-}
-
-llvm::Value* GBCExpanderPass::TruncateCat1Types(llvm::Value* value, JType jty) {
- switch (jty) {
- case kBoolean:
- case kChar:
- case kByte:
- case kShort:
- return irb_.CreateTrunc(value, irb_.getJType(jty));
- case kVoid:
- case kInt:
- case kLong:
- case kFloat:
- case kDouble:
- case kObject:
- return value; // Nothing to do.
- default:
- LOG(FATAL) << "Unknown java type: " << jty;
- return NULL;
- }
-}
-
-llvm::Value* GBCExpanderPass::Expand_HLArrayGet(llvm::CallInst& call_inst,
- JType elem_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* array_addr = call_inst.getArgOperand(1);
- llvm::Value* index_value = call_inst.getArgOperand(2);
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
- EmitGuard_ArrayIndexOutOfBoundsException(dex_pc, array_addr, index_value,
- opt_flags);
-
- llvm::Value* array_elem_addr = EmitArrayGEP(array_addr, index_value, elem_jty);
-
- llvm::Value* array_elem_value = irb_.CreateLoad(array_elem_addr, kTBAAHeapArray, elem_jty);
-
- return SignOrZeroExtendCat1Types(array_elem_value, elem_jty);
-}
-
-
-void GBCExpanderPass::Expand_HLArrayPut(llvm::CallInst& call_inst,
- JType elem_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* new_value = call_inst.getArgOperand(1);
- llvm::Value* array_addr = call_inst.getArgOperand(2);
- llvm::Value* index_value = call_inst.getArgOperand(3);
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
- EmitGuard_ArrayIndexOutOfBoundsException(dex_pc, array_addr, index_value,
- opt_flags);
-
- new_value = TruncateCat1Types(new_value, elem_jty);
-
- llvm::Value* array_elem_addr = EmitArrayGEP(array_addr, index_value, elem_jty);
-
- if (elem_jty == kObject) { // If put an object, check the type, and mark GC card table.
- llvm::Function* runtime_func = irb_.GetRuntime(CheckPutArrayElement);
-
- irb_.CreateCall2(runtime_func, new_value, array_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- EmitMarkGCCard(new_value, array_addr);
- }
-
- irb_.CreateStore(new_value, array_elem_addr, kTBAAHeapArray, elem_jty);
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_HLIGet(llvm::CallInst& call_inst,
- JType field_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* object_addr = call_inst.getArgOperand(1);
- uint32_t field_idx = LV2UInt(call_inst.getArgOperand(2));
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
-
- llvm::Value* field_value;
-
- art::MemberOffset field_offset(0u);
- bool is_volatile;
- bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, false, &field_offset, &is_volatile);
-
- if (!is_fast_path) {
- llvm::Function* runtime_func;
-
- if (field_jty == kObject) {
- runtime_func = irb_.GetRuntime(GetObjectInstance);
- } else if (field_jty == kLong || field_jty == kDouble) {
- runtime_func = irb_.GetRuntime(Get64Instance);
- } else {
- runtime_func = irb_.GetRuntime(Get32Instance);
- }
-
- llvm::ConstantInt* field_idx_value = irb_.getInt32(field_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- EmitUpdateDexPC(dex_pc);
-
- field_value = irb_.CreateCall3(runtime_func, field_idx_value,
- method_object_addr, object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- if (field_jty == kFloat || field_jty == kDouble) {
- field_value = irb_.CreateBitCast(field_value, irb_.getJType(field_jty));
- }
- } else {
- DCHECK_GE(field_offset.Int32Value(), 0);
-
- llvm::PointerType* field_type =
- irb_.getJType(field_jty)->getPointerTo();
-
- llvm::ConstantInt* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
-
- llvm::Value* field_addr =
- irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
-
- field_value = irb_.CreateLoad(field_addr, kTBAAHeapInstance, field_jty);
- field_value = SignOrZeroExtendCat1Types(field_value, field_jty);
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kLoadAny);
- }
- }
-
- return field_value;
-}
-
-void GBCExpanderPass::Expand_HLIPut(llvm::CallInst& call_inst,
- JType field_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* new_value = call_inst.getArgOperand(1);
- llvm::Value* object_addr = call_inst.getArgOperand(2);
- uint32_t field_idx = LV2UInt(call_inst.getArgOperand(3));
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
-
- art::MemberOffset field_offset(0u);
- bool is_volatile;
- bool is_fast_path = driver_->ComputeInstanceFieldInfo(
- field_idx, dex_compilation_unit_, true, &field_offset, &is_volatile);
-
- if (!is_fast_path) {
- llvm::Function* runtime_func;
-
- if (field_jty == kFloat) {
- new_value = irb_.CreateBitCast(new_value, irb_.getJType(kInt));
- } else if (field_jty == kDouble) {
- new_value = irb_.CreateBitCast(new_value, irb_.getJType(kLong));
- }
-
- if (field_jty == kObject) {
- runtime_func = irb_.GetRuntime(SetObjectInstance);
- } else if (field_jty == kLong || field_jty == kDouble) {
- runtime_func = irb_.GetRuntime(Set64Instance);
- } else {
- runtime_func = irb_.GetRuntime(Set32Instance);
- }
-
- llvm::Value* field_idx_value = irb_.getInt32(field_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.CreateCall4(runtime_func, field_idx_value,
- method_object_addr, object_addr, new_value);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- } else {
- DCHECK_GE(field_offset.Int32Value(), 0);
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kAnyStore);
- }
-
- llvm::PointerType* field_type =
- irb_.getJType(field_jty)->getPointerTo();
-
- llvm::Value* field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
-
- llvm::Value* field_addr =
- irb_.CreatePtrDisp(object_addr, field_offset_value, field_type);
-
- new_value = TruncateCat1Types(new_value, field_jty);
- irb_.CreateStore(new_value, field_addr, kTBAAHeapInstance, field_jty);
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kAnyAny);
- }
-
- if (field_jty == kObject) { // If put an object, mark the GC card table.
- EmitMarkGCCard(new_value, object_addr);
- }
- }
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::EmitLoadConstantClass(uint32_t dex_pc,
- uint32_t type_idx) {
- if (!driver_->CanAccessTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
- *dex_compilation_unit_->GetDexFile(), type_idx)) {
- llvm::Value* type_idx_value = irb_.getInt32(type_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- llvm::Function* runtime_func = irb_.GetRuntime(InitializeTypeAndVerifyAccess);
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* type_object_addr =
- irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return type_object_addr;
-
- } else {
- // Try to load the class (type) object from the test cache.
- llvm::Value* type_field_addr =
- EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
-
- llvm::Value* type_object_addr = irb_.CreateLoad(type_field_addr, kTBAARuntimeInfo);
-
- if (driver_->CanAssumeTypeIsPresentInDexCache(*dex_compilation_unit_->GetDexFile(), type_idx)) {
- return type_object_addr;
- }
-
- llvm::BasicBlock* block_original = irb_.GetInsertBlock();
-
- // Test whether class (type) object is in the dex cache or not
- llvm::Value* equal_null =
- irb_.CreateICmpEQ(type_object_addr, irb_.getJNull());
-
- llvm::BasicBlock* block_cont =
- CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- llvm::BasicBlock* block_load_class =
- CreateBasicBlockWithDexPC(dex_pc, "load_class");
-
- irb_.CreateCondBr(equal_null, block_load_class, block_cont, kUnlikely);
-
- // Failback routine to load the class object
- irb_.SetInsertPoint(block_load_class);
-
- llvm::Function* runtime_func = irb_.GetRuntime(InitializeType);
-
- llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* loaded_type_object_addr =
- irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- llvm::BasicBlock* block_after_load_class = irb_.GetInsertBlock();
-
- irb_.CreateBr(block_cont);
-
- // Now the class object must be loaded
- irb_.SetInsertPoint(block_cont);
-
- llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
-
- phi->addIncoming(type_object_addr, block_original);
- phi->addIncoming(loaded_type_object_addr, block_after_load_class);
-
- return phi;
- }
-}
-
-llvm::Value* GBCExpanderPass::EmitLoadStaticStorage(uint32_t dex_pc,
- uint32_t type_idx) {
- llvm::BasicBlock* block_load_static =
- CreateBasicBlockWithDexPC(dex_pc, "load_static");
-
- llvm::BasicBlock* block_check_init = CreateBasicBlockWithDexPC(dex_pc, "init");
- llvm::BasicBlock* block_cont = CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- // Load static storage from dex cache
- llvm::Value* storage_field_addr = EmitLoadDexCacheResolvedTypeFieldAddr(type_idx);
-
- llvm::Value* storage_object_addr = irb_.CreateLoad(storage_field_addr, kTBAARuntimeInfo);
-
- // Test: Is the class resolved?
- llvm::Value* equal_null = irb_.CreateICmpEQ(storage_object_addr, irb_.getJNull());
-
- irb_.CreateCondBr(equal_null, block_load_static, block_check_init, kUnlikely);
-
- // storage_object_addr != null, so check if its initialized.
- irb_.SetInsertPoint(block_check_init);
-
- llvm::Value* class_status =
- irb_.LoadFromObjectOffset(storage_object_addr,
- art::mirror::Class::StatusOffset().Int32Value(),
- irb_.getJIntTy(), kTBAAHeapInstance);
-
- llvm::Value* is_not_initialized =
- irb_.CreateICmpULT(class_status, irb_.getInt32(art::mirror::Class::kStatusInitialized));
-
- irb_.CreateCondBr(is_not_initialized, block_load_static, block_cont, kUnlikely);
-
- // Failback routine to load the class object
- irb_.SetInsertPoint(block_load_static);
-
- llvm::Function* runtime_func = irb_.GetRuntime(InitializeStaticStorage);
-
- llvm::Constant* type_idx_value = irb_.getInt32(type_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* loaded_storage_object_addr =
- irb_.CreateCall3(runtime_func, type_idx_value, method_object_addr, thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- llvm::BasicBlock* block_after_load_static = irb_.GetInsertBlock();
-
- irb_.CreateBr(block_cont);
-
- // Now the class object must be loaded
- irb_.SetInsertPoint(block_cont);
-
- llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
-
- phi->addIncoming(storage_object_addr, block_check_init);
- phi->addIncoming(loaded_storage_object_addr, block_after_load_static);
-
- // Ensure load of status and load of value don't re-order.
- irb_.CreateMemoryBarrier(art::kLoadAny);
-
- return phi;
-}
-
-llvm::Value* GBCExpanderPass::Expand_HLSget(llvm::CallInst& call_inst,
- JType field_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
-
- art::MemberOffset field_offset(0u);
- uint32_t ssb_index;
- bool is_referrers_class;
- bool is_volatile;
- bool is_initialized;
-
- bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, false,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile, &is_initialized);
-
- llvm::Value* static_field_value;
-
- if (!is_fast_path) {
- llvm::Function* runtime_func;
-
- if (field_jty == kObject) {
- runtime_func = irb_.GetRuntime(GetObjectStatic);
- } else if (field_jty == kLong || field_jty == kDouble) {
- runtime_func = irb_.GetRuntime(Get64Static);
- } else {
- runtime_func = irb_.GetRuntime(Get32Static);
- }
-
- llvm::Constant* field_idx_value = irb_.getInt32(field_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- EmitUpdateDexPC(dex_pc);
-
- static_field_value =
- irb_.CreateCall2(runtime_func, field_idx_value, method_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- if (field_jty == kFloat || field_jty == kDouble) {
- static_field_value = irb_.CreateBitCast(static_field_value, irb_.getJType(field_jty));
- }
- } else {
- DCHECK_GE(field_offset.Int32Value(), 0);
-
- llvm::Value* static_storage_addr = NULL;
-
- if (is_referrers_class) {
- // Fast path, static storage base is this method's class
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- static_storage_addr =
- irb_.LoadFromObjectOffset(method_object_addr,
- art::mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized
- DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
- static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
- }
-
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
-
- llvm::Value* static_field_addr =
- irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
- irb_.getJType(field_jty)->getPointerTo());
-
- static_field_value = irb_.CreateLoad(static_field_addr, kTBAAHeapStatic, field_jty);
- static_field_value = SignOrZeroExtendCat1Types(static_field_value, field_jty);
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kLoadAny);
- }
- }
-
- return static_field_value;
-}
-
-void GBCExpanderPass::Expand_HLSput(llvm::CallInst& call_inst,
- JType field_jty) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t field_idx = LV2UInt(call_inst.getArgOperand(0));
- llvm::Value* new_value = call_inst.getArgOperand(1);
-
- if (field_jty == kFloat || field_jty == kDouble) {
- new_value = irb_.CreateBitCast(new_value, irb_.getJType(field_jty));
- }
-
- art::MemberOffset field_offset(0u);
- uint32_t ssb_index;
- bool is_referrers_class;
- bool is_volatile;
- bool is_initialized;
-
- bool is_fast_path = driver_->ComputeStaticFieldInfo(
- field_idx, dex_compilation_unit_, true,
- &field_offset, &ssb_index, &is_referrers_class, &is_volatile, &is_initialized);
-
- if (!is_fast_path) {
- llvm::Function* runtime_func;
-
- if (field_jty == kObject) {
- runtime_func = irb_.GetRuntime(SetObjectStatic);
- } else if (field_jty == kLong || field_jty == kDouble) {
- runtime_func = irb_.GetRuntime(Set64Static);
- } else {
- runtime_func = irb_.GetRuntime(Set32Static);
- }
-
- if (field_jty == kFloat) {
- new_value = irb_.CreateBitCast(new_value, irb_.getJType(kInt));
- } else if (field_jty == kDouble) {
- new_value = irb_.CreateBitCast(new_value, irb_.getJType(kLong));
- }
-
- llvm::Constant* field_idx_value = irb_.getInt32(field_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.CreateCall3(runtime_func, field_idx_value,
- method_object_addr, new_value);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- } else {
- DCHECK_GE(field_offset.Int32Value(), 0);
-
- llvm::Value* static_storage_addr = NULL;
-
- if (is_referrers_class) {
- // Fast path, static storage base is this method's class
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- static_storage_addr =
- irb_.LoadFromObjectOffset(method_object_addr,
- art::mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAAConstJObject);
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized
- DCHECK_NE(ssb_index, art::DexFile::kDexNoIndex);
- static_storage_addr = EmitLoadStaticStorage(dex_pc, ssb_index);
- }
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kAnyStore);
- }
-
- llvm::Value* static_field_offset_value = irb_.getPtrEquivInt(field_offset.Int32Value());
-
- llvm::Value* static_field_addr =
- irb_.CreatePtrDisp(static_storage_addr, static_field_offset_value,
- irb_.getJType(field_jty)->getPointerTo());
-
- new_value = TruncateCat1Types(new_value, field_jty);
- irb_.CreateStore(new_value, static_field_addr, kTBAAHeapStatic, field_jty);
-
- if (is_volatile) {
- irb_.CreateMemoryBarrier(art::kAnyAny);
- }
-
- if (field_jty == kObject) { // If put an object, mark the GC card table.
- EmitMarkGCCard(new_value, static_storage_addr);
- }
- }
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_ConstString(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t string_idx = LV2UInt(call_inst.getArgOperand(0));
-
- llvm::Value* string_field_addr = EmitLoadDexCacheStringFieldAddr(string_idx);
-
- llvm::Value* string_addr = irb_.CreateLoad(string_field_addr, kTBAARuntimeInfo);
-
- if (!driver_->CanAssumeStringIsPresentInDexCache(*dex_compilation_unit_->GetDexFile(),
- string_idx)) {
- llvm::BasicBlock* block_str_exist =
- CreateBasicBlockWithDexPC(dex_pc, "str_exist");
-
- llvm::BasicBlock* block_str_resolve =
- CreateBasicBlockWithDexPC(dex_pc, "str_resolve");
-
- llvm::BasicBlock* block_cont =
- CreateBasicBlockWithDexPC(dex_pc, "str_cont");
-
- // Test: Is the string resolved and in the dex cache?
- llvm::Value* equal_null = irb_.CreateICmpEQ(string_addr, irb_.getJNull());
-
- irb_.CreateCondBr(equal_null, block_str_resolve, block_str_exist, kUnlikely);
-
- // String is resolved, go to next basic block.
- irb_.SetInsertPoint(block_str_exist);
- irb_.CreateBr(block_cont);
-
- // String is not resolved yet, resolve it now.
- irb_.SetInsertPoint(block_str_resolve);
-
- llvm::Function* runtime_func = irb_.GetRuntime(ResolveString);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* string_idx_value = irb_.getInt32(string_idx);
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* result = irb_.CreateCall2(runtime_func, method_object_addr,
- string_idx_value);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- irb_.CreateBr(block_cont);
-
-
- llvm::BasicBlock* block_pre_cont = irb_.GetInsertBlock();
-
- irb_.SetInsertPoint(block_cont);
-
- llvm::PHINode* phi = irb_.CreatePHI(irb_.getJObjectTy(), 2);
-
- phi->addIncoming(string_addr, block_str_exist);
- phi->addIncoming(result, block_pre_cont);
-
- string_addr = phi;
- }
-
- return string_addr;
-}
-
-llvm::Value* GBCExpanderPass::Expand_ConstClass(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
-
- llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
-
- return type_object_addr;
-}
-
-void GBCExpanderPass::Expand_MonitorEnter(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* object_addr = call_inst.getArgOperand(1);
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.Runtime().EmitLockObject(object_addr);
-
- return;
-}
-
-void GBCExpanderPass::Expand_MonitorExit(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* object_addr = call_inst.getArgOperand(1);
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, object_addr, opt_flags);
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.Runtime().EmitUnlockObject(object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return;
-}
-
-void GBCExpanderPass::Expand_HLCheckCast(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
- llvm::Value* object_addr = call_inst.getArgOperand(1);
-
- llvm::BasicBlock* block_test_class =
- CreateBasicBlockWithDexPC(dex_pc, "test_class");
-
- llvm::BasicBlock* block_test_sub_class =
- CreateBasicBlockWithDexPC(dex_pc, "test_sub_class");
-
- llvm::BasicBlock* block_cont =
- CreateBasicBlockWithDexPC(dex_pc, "checkcast_cont");
-
- // Test: Is the reference equal to null? Act as no-op when it is null.
- llvm::Value* equal_null = irb_.CreateICmpEQ(object_addr, irb_.getJNull());
-
- irb_.CreateCondBr(equal_null, block_cont, block_test_class, kUnlikely);
-
- // Test: Is the object instantiated from the given class?
- irb_.SetInsertPoint(block_test_class);
- llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
- DCHECK_EQ(art::mirror::Object::ClassOffset().Int32Value(), 0);
-
- llvm::PointerType* jobject_ptr_ty = irb_.getJObjectTy();
-
- llvm::Value* object_type_field_addr =
- irb_.CreateBitCast(object_addr, jobject_ptr_ty->getPointerTo());
-
- llvm::Value* object_type_object_addr =
- irb_.CreateLoad(object_type_field_addr, kTBAAConstJObject);
-
- llvm::Value* equal_class =
- irb_.CreateICmpEQ(type_object_addr, object_type_object_addr);
-
- irb_.CreateCondBr(equal_class, block_cont, block_test_sub_class, kLikely);
-
- // Test: Is the object instantiated from the subclass of the given class?
- irb_.SetInsertPoint(block_test_sub_class);
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.CreateCall2(irb_.GetRuntime(CheckCast),
- type_object_addr, object_type_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- irb_.CreateBr(block_cont);
-
- irb_.SetInsertPoint(block_cont);
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::Expand_InstanceOf(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
- llvm::Value* object_addr = call_inst.getArgOperand(1);
-
- llvm::BasicBlock* block_nullp =
- CreateBasicBlockWithDexPC(dex_pc, "nullp");
-
- llvm::BasicBlock* block_test_class =
- CreateBasicBlockWithDexPC(dex_pc, "test_class");
-
- llvm::BasicBlock* block_class_equals =
- CreateBasicBlockWithDexPC(dex_pc, "class_eq");
-
- llvm::BasicBlock* block_test_sub_class =
- CreateBasicBlockWithDexPC(dex_pc, "test_sub_class");
-
- llvm::BasicBlock* block_cont =
- CreateBasicBlockWithDexPC(dex_pc, "instance_of_cont");
-
- // Overview of the following code :
- // We check for null, if so, then false, otherwise check for class == . If so
- // then true, otherwise do callout slowpath.
- //
- // Test: Is the reference equal to null? Set 0 when it is null.
- llvm::Value* equal_null = irb_.CreateICmpEQ(object_addr, irb_.getJNull());
-
- irb_.CreateCondBr(equal_null, block_nullp, block_test_class, kUnlikely);
-
- irb_.SetInsertPoint(block_nullp);
- irb_.CreateBr(block_cont);
-
- // Test: Is the object instantiated from the given class?
- irb_.SetInsertPoint(block_test_class);
- llvm::Value* type_object_addr = EmitLoadConstantClass(dex_pc, type_idx);
- DCHECK_EQ(art::mirror::Object::ClassOffset().Int32Value(), 0);
-
- llvm::PointerType* jobject_ptr_ty = irb_.getJObjectTy();
-
- llvm::Value* object_type_field_addr =
- irb_.CreateBitCast(object_addr, jobject_ptr_ty->getPointerTo());
-
- llvm::Value* object_type_object_addr =
- irb_.CreateLoad(object_type_field_addr, kTBAAConstJObject);
-
- llvm::Value* equal_class =
- irb_.CreateICmpEQ(type_object_addr, object_type_object_addr);
-
- irb_.CreateCondBr(equal_class, block_class_equals, block_test_sub_class, kLikely);
-
- irb_.SetInsertPoint(block_class_equals);
- irb_.CreateBr(block_cont);
-
- // Test: Is the object instantiated from the subclass of the given class?
- irb_.SetInsertPoint(block_test_sub_class);
- llvm::Value* result =
- irb_.CreateCall2(irb_.GetRuntime(IsAssignable),
- type_object_addr, object_type_object_addr);
- irb_.CreateBr(block_cont);
-
- irb_.SetInsertPoint(block_cont);
-
- llvm::PHINode* phi = irb_.CreatePHI(irb_.getJIntTy(), 3);
-
- phi->addIncoming(irb_.getJInt(0), block_nullp);
- phi->addIncoming(irb_.getJInt(1), block_class_equals);
- phi->addIncoming(result, block_test_sub_class);
-
- return phi;
-}
-
-llvm::Value* GBCExpanderPass::Expand_NewInstance(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
-
- llvm::Function* runtime_func;
- if (driver_->CanAccessInstantiableTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
- *dex_compilation_unit_->GetDexFile(),
- type_idx)) {
- runtime_func = irb_.GetRuntime(AllocObject);
- } else {
- runtime_func = irb_.GetRuntime(AllocObjectWithAccessCheck);
- }
-
- llvm::Constant* type_index_value = irb_.getInt32(type_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* object_addr =
- irb_.CreateCall3(runtime_func, type_index_value, method_object_addr, thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return object_addr;
-}
-
-llvm::Value* GBCExpanderPass::Expand_HLInvoke(llvm::CallInst& call_inst) {
- art::InvokeType invoke_type = static_cast<art::InvokeType>(LV2UInt(call_inst.getArgOperand(0)));
- bool is_static = (invoke_type == art::kStatic);
-
- if (!is_static) {
- // Test: Is *this* parameter equal to null?
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- llvm::Value* this_addr = call_inst.getArgOperand(3);
- int opt_flags = LV2UInt(call_inst.getArgOperand(2));
-
- EmitGuard_NullPointerException(dex_pc, this_addr, opt_flags);
- }
-
- llvm::Value* result = NULL;
- if (EmitIntrinsic(call_inst, &result)) {
- return result;
- }
-
- return EmitInvoke(call_inst);
-}
-
-llvm::Value* GBCExpanderPass::Expand_OptArrayLength(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- // Get the array object address
- llvm::Value* array_addr = call_inst.getArgOperand(1);
- int opt_flags = LV2UInt(call_inst.getArgOperand(0));
-
- EmitGuard_NullPointerException(dex_pc, array_addr, opt_flags);
-
- // Get the array length and store it to the register
- return EmitLoadArrayLength(array_addr);
-}
-
-llvm::Value* GBCExpanderPass::Expand_NewArray(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(0));
- llvm::Value* length = call_inst.getArgOperand(1);
-
- return EmitAllocNewArray(dex_pc, length, type_idx, false);
-}
-
-llvm::Value* GBCExpanderPass::Expand_HLFilledNewArray(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- uint32_t type_idx = LV2UInt(call_inst.getArgOperand(1));
- uint32_t length = call_inst.getNumArgOperands() - 3;
-
- llvm::Value* object_addr =
- EmitAllocNewArray(dex_pc, irb_.getInt32(length), type_idx, true);
-
- if (length > 0) {
- // Check for the element type
- uint32_t type_desc_len = 0;
- const char* type_desc =
- dex_compilation_unit_->GetDexFile()->StringByTypeIdx(type_idx, &type_desc_len);
-
- DCHECK_GE(type_desc_len, 2u); // should be guaranteed by verifier
- DCHECK_EQ(type_desc[0], '['); // should be guaranteed by verifier
- bool is_elem_int_ty = (type_desc[1] == 'I');
-
- uint32_t alignment;
- llvm::Constant* elem_size;
- llvm::PointerType* field_type;
-
- // NOTE: Currently filled-new-array only supports 'L', '[', and 'I'
- // as the element, thus we are only checking 2 cases: primitive int and
- // non-primitive type.
- if (is_elem_int_ty) {
- alignment = sizeof(int32_t);
- elem_size = irb_.getPtrEquivInt(sizeof(int32_t));
- field_type = irb_.getJIntTy()->getPointerTo();
- } else {
- alignment = irb_.getSizeOfPtrEquivInt();
- elem_size = irb_.getSizeOfPtrEquivIntValue();
- field_type = irb_.getJObjectTy()->getPointerTo();
- }
-
- llvm::Value* data_field_offset =
- irb_.getPtrEquivInt(art::mirror::Array::DataOffset(alignment).Int32Value());
-
- llvm::Value* data_field_addr =
- irb_.CreatePtrDisp(object_addr, data_field_offset, field_type);
-
- // TODO: Tune this code. Currently we are generating one instruction for
- // one element which may be very space consuming. Maybe changing to use
- // memcpy may help; however, since we can't guarantee that the alloca of
- // dalvik register are continuous, we can't perform such optimization yet.
- for (uint32_t i = 0; i < length; ++i) {
- llvm::Value* reg_value = call_inst.getArgOperand(i+3);
-
- irb_.CreateStore(reg_value, data_field_addr, kTBAAHeapArray);
-
- data_field_addr =
- irb_.CreatePtrDisp(data_field_addr, elem_size, field_type);
- }
- }
-
- return object_addr;
-}
-
-void GBCExpanderPass::Expand_HLFillArrayData(llvm::CallInst& call_inst) {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
- int32_t payload_offset = static_cast<int32_t>(dex_pc) +
- LV2SInt(call_inst.getArgOperand(0));
- llvm::Value* array_addr = call_inst.getArgOperand(1);
-
- const art::Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const art::Instruction::ArrayDataPayload*>(
- dex_compilation_unit_->GetCodeItem()->insns_ + payload_offset);
-
- if (payload->element_count == 0) {
- // When the number of the elements in the payload is zero, we don't have
- // to copy any numbers. However, we should check whether the array object
- // address is equal to null or not.
- EmitGuard_NullPointerException(dex_pc, array_addr, 0);
- } else {
- // To save the code size, we are going to call the runtime function to
- // copy the content from DexFile.
-
- // NOTE: We will check for the NullPointerException in the runtime.
-
- llvm::Function* runtime_func = irb_.GetRuntime(FillArrayData);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.CreateCall4(runtime_func,
- method_object_addr, irb_.getInt32(dex_pc),
- array_addr, irb_.getInt32(payload_offset));
-
- EmitGuard_ExceptionLandingPad(dex_pc);
- }
-
- return;
-}
-
-llvm::Value* GBCExpanderPass::EmitAllocNewArray(uint32_t dex_pc,
- llvm::Value* array_length_value,
- uint32_t type_idx,
- bool is_filled_new_array) {
- llvm::Function* runtime_func;
-
- bool skip_access_check =
- driver_->CanAccessTypeWithoutChecks(dex_compilation_unit_->GetDexMethodIndex(),
- *dex_compilation_unit_->GetDexFile(), type_idx);
-
-
- if (is_filled_new_array) {
- runtime_func = skip_access_check ?
- irb_.GetRuntime(CheckAndAllocArray) :
- irb_.GetRuntime(CheckAndAllocArrayWithAccessCheck);
- } else {
- runtime_func = skip_access_check ?
- irb_.GetRuntime(AllocArray) :
- irb_.GetRuntime(AllocArrayWithAccessCheck);
- }
-
- llvm::Constant* type_index_value = irb_.getInt32(type_idx);
-
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* object_addr =
- irb_.CreateCall4(runtime_func, type_index_value, method_object_addr,
- array_length_value, thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return object_addr;
-}
-
-llvm::Value* GBCExpanderPass::
-EmitCallRuntimeForCalleeMethodObjectAddr(uint32_t callee_method_idx,
- art::InvokeType invoke_type,
- llvm::Value* this_addr,
- uint32_t dex_pc,
- bool is_fast_path) {
- llvm::Function* runtime_func = NULL;
-
- switch (invoke_type) {
- case art::kStatic:
- runtime_func = irb_.GetRuntime(FindStaticMethodWithAccessCheck);
- break;
-
- case art::kDirect:
- runtime_func = irb_.GetRuntime(FindDirectMethodWithAccessCheck);
- break;
-
- case art::kVirtual:
- runtime_func = irb_.GetRuntime(FindVirtualMethodWithAccessCheck);
- break;
-
- case art::kSuper:
- runtime_func = irb_.GetRuntime(FindSuperMethodWithAccessCheck);
- break;
-
- case art::kInterface:
- if (is_fast_path) {
- runtime_func = irb_.GetRuntime(FindInterfaceMethod);
- } else {
- runtime_func = irb_.GetRuntime(FindInterfaceMethodWithAccessCheck);
- }
- break;
- }
-
- llvm::Value* callee_method_idx_value = irb_.getInt32(callee_method_idx);
-
- if (this_addr == NULL) {
- DCHECK_EQ(invoke_type, art::kStatic);
- this_addr = irb_.getJNull();
- }
-
- llvm::Value* caller_method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* thread_object_addr = irb_.Runtime().EmitGetCurrentThread();
-
- EmitUpdateDexPC(dex_pc);
-
- llvm::Value* callee_method_object_addr =
- irb_.CreateCall4(runtime_func,
- callee_method_idx_value,
- this_addr,
- caller_method_object_addr,
- thread_object_addr);
-
- EmitGuard_ExceptionLandingPad(dex_pc);
-
- return callee_method_object_addr;
-}
-
-void GBCExpanderPass::EmitMarkGCCard(llvm::Value* value, llvm::Value* target_addr) {
- // Using runtime support, let the target can override by InlineAssembly.
- irb_.Runtime().EmitMarkGCCard(value, target_addr);
-}
-
-void GBCExpanderPass::EmitUpdateDexPC(uint32_t dex_pc) {
- if (shadow_frame_ == NULL) {
- return;
- }
- irb_.StoreToObjectOffset(shadow_frame_,
- art::ShadowFrame::DexPCOffset(),
- irb_.getInt32(dex_pc),
- kTBAAShadowFrame);
-}
-
-void GBCExpanderPass::EmitGuard_DivZeroException(uint32_t dex_pc,
- llvm::Value* denominator,
- JType op_jty) {
- DCHECK(op_jty == kInt || op_jty == kLong) << op_jty;
-
- llvm::Constant* zero = irb_.getJZero(op_jty);
-
- llvm::Value* equal_zero = irb_.CreateICmpEQ(denominator, zero);
-
- llvm::BasicBlock* block_exception = CreateBasicBlockWithDexPC(dex_pc, "div0");
-
- llvm::BasicBlock* block_continue = CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- irb_.CreateCondBr(equal_zero, block_exception, block_continue, kUnlikely);
-
- irb_.SetInsertPoint(block_exception);
- EmitUpdateDexPC(dex_pc);
- irb_.CreateCall(irb_.GetRuntime(ThrowDivZeroException));
- EmitBranchExceptionLandingPad(dex_pc);
-
- irb_.SetInsertPoint(block_continue);
-}
-
-void GBCExpanderPass::EmitGuard_NullPointerException(uint32_t dex_pc,
- llvm::Value* object,
- int opt_flags) {
- bool ignore_null_check = ((opt_flags & MIR_IGNORE_NULL_CHECK) != 0);
- if (ignore_null_check) {
- llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc);
- if (lpad) {
- // There is at least one catch: create a "fake" conditional branch to
- // keep the exception edge to the catch block.
- landing_pad_phi_mapping_[lpad].push_back(
- std::make_pair(current_bb_->getUniquePredecessor(),
- irb_.GetInsertBlock()));
-
- llvm::BasicBlock* block_continue =
- CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- irb_.CreateCondBr(irb_.getFalse(), lpad, block_continue, kUnlikely);
-
- irb_.SetInsertPoint(block_continue);
- }
- } else {
- llvm::Value* equal_null = irb_.CreateICmpEQ(object, irb_.getJNull());
-
- llvm::BasicBlock* block_exception =
- CreateBasicBlockWithDexPC(dex_pc, "nullp");
-
- llvm::BasicBlock* block_continue =
- CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- irb_.CreateCondBr(equal_null, block_exception, block_continue, kUnlikely);
-
- irb_.SetInsertPoint(block_exception);
- EmitUpdateDexPC(dex_pc);
- irb_.CreateCall(irb_.GetRuntime(ThrowNullPointerException),
- irb_.getInt32(dex_pc));
- EmitBranchExceptionLandingPad(dex_pc);
-
- irb_.SetInsertPoint(block_continue);
- }
-}
-
-void
-GBCExpanderPass::EmitGuard_ArrayIndexOutOfBoundsException(uint32_t dex_pc,
- llvm::Value* array,
- llvm::Value* index,
- int opt_flags) {
- bool ignore_range_check = ((opt_flags & MIR_IGNORE_RANGE_CHECK) != 0);
- if (ignore_range_check) {
- llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc);
- if (lpad) {
- // There is at least one catch: create a "fake" conditional branch to
- // keep the exception edge to the catch block.
- landing_pad_phi_mapping_[lpad].push_back(
- std::make_pair(current_bb_->getUniquePredecessor(),
- irb_.GetInsertBlock()));
-
- llvm::BasicBlock* block_continue =
- CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- irb_.CreateCondBr(irb_.getFalse(), lpad, block_continue, kUnlikely);
-
- irb_.SetInsertPoint(block_continue);
- }
- } else {
- llvm::Value* array_len = EmitLoadArrayLength(array);
-
- llvm::Value* cmp = irb_.CreateICmpUGE(index, array_len);
-
- llvm::BasicBlock* block_exception =
- CreateBasicBlockWithDexPC(dex_pc, "overflow");
-
- llvm::BasicBlock* block_continue =
- CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- irb_.CreateCondBr(cmp, block_exception, block_continue, kUnlikely);
-
- irb_.SetInsertPoint(block_exception);
-
- EmitUpdateDexPC(dex_pc);
- irb_.CreateCall2(irb_.GetRuntime(ThrowIndexOutOfBounds), index, array_len);
- EmitBranchExceptionLandingPad(dex_pc);
-
- irb_.SetInsertPoint(block_continue);
- }
-}
-
-llvm::FunctionType* GBCExpanderPass::GetFunctionType(llvm::Type* ret_type, uint32_t method_idx,
- bool is_static) {
- // Get method signature
- art::DexFile::MethodId const& method_id =
- dex_compilation_unit_->GetDexFile()->GetMethodId(method_idx);
-
- uint32_t shorty_size;
- const char* shorty = dex_compilation_unit_->GetDexFile()->GetMethodShorty(method_id, &shorty_size);
- CHECK_GE(shorty_size, 1u);
-
- // Get argument type
- std::vector<llvm::Type*> args_type;
-
- args_type.push_back(irb_.getJObjectTy()); // method object pointer
-
- if (!is_static) {
- args_type.push_back(irb_.getJType('L')); // "this" object pointer
- }
-
- for (uint32_t i = 1; i < shorty_size; ++i) {
- char shorty_type = art::RemapShorty(shorty[i]);
- args_type.push_back(irb_.getJType(shorty_type));
- }
-
- return llvm::FunctionType::get(ret_type, args_type, false);
-}
-
-
-llvm::BasicBlock* GBCExpanderPass::
-CreateBasicBlockWithDexPC(uint32_t dex_pc, const char* postfix) {
- std::string name;
-
-#if !defined(NDEBUG)
- art::StringAppendF(&name, "B%04x.%s", dex_pc, postfix);
-#endif
-
- return llvm::BasicBlock::Create(context_, name, func_);
-}
-
-llvm::BasicBlock* GBCExpanderPass::GetBasicBlock(uint32_t dex_pc) {
- DCHECK(dex_pc < dex_compilation_unit_->GetCodeItem()->insns_size_in_code_units_);
- CHECK(basic_blocks_[dex_pc] != NULL);
- return basic_blocks_[dex_pc];
-}
-
-int32_t GBCExpanderPass::GetTryItemOffset(uint32_t dex_pc) {
- int32_t min = 0;
- int32_t max = dex_compilation_unit_->GetCodeItem()->tries_size_ - 1;
-
- while (min <= max) {
- int32_t mid = min + (max - min) / 2;
-
- const art::DexFile::TryItem* ti =
- art::DexFile::GetTryItems(*dex_compilation_unit_->GetCodeItem(), mid);
- uint32_t start = ti->start_addr_;
- uint32_t end = start + ti->insn_count_;
-
- if (dex_pc < start) {
- max = mid - 1;
- } else if (dex_pc >= end) {
- min = mid + 1;
- } else {
- return mid; // found
- }
- }
-
- return -1; // not found
-}
-
-llvm::BasicBlock* GBCExpanderPass::GetLandingPadBasicBlock(uint32_t dex_pc) {
- // Find the try item for this address in this method
- int32_t ti_offset = GetTryItemOffset(dex_pc);
-
- if (ti_offset == -1) {
- return NULL; // No landing pad is available for this address.
- }
-
- // Check for the existing landing pad basic block
- DCHECK_GT(basic_block_landing_pads_.size(), static_cast<size_t>(ti_offset));
- llvm::BasicBlock* block_lpad = basic_block_landing_pads_[ti_offset];
-
- if (block_lpad) {
- // We have generated landing pad for this try item already. Return the
- // same basic block.
- return block_lpad;
- }
-
- // Get try item from code item
- const art::DexFile::TryItem* ti = art::DexFile::GetTryItems(*dex_compilation_unit_->GetCodeItem(),
- ti_offset);
-
- std::string lpadname;
-
-#if !defined(NDEBUG)
- art::StringAppendF(&lpadname, "lpad%d_%04x_to_%04x", ti_offset, ti->start_addr_, ti->handler_off_);
-#endif
-
- // Create landing pad basic block
- block_lpad = llvm::BasicBlock::Create(context_, lpadname, func_);
-
- // Change IRBuilder insert point
- llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
- irb_.SetInsertPoint(block_lpad);
-
- // Find catch block with matching type
- llvm::Value* method_object_addr = EmitLoadMethodObjectAddr();
-
- llvm::Value* ti_offset_value = irb_.getInt32(ti_offset);
-
- llvm::Value* catch_handler_index_value =
- irb_.CreateCall2(irb_.GetRuntime(FindCatchBlock),
- method_object_addr, ti_offset_value);
-
- // Switch instruction (Go to unwind basic block by default)
- llvm::SwitchInst* sw =
- irb_.CreateSwitch(catch_handler_index_value, GetUnwindBasicBlock());
-
- // Cases with matched catch block
- art::CatchHandlerIterator iter(*dex_compilation_unit_->GetCodeItem(), ti->start_addr_);
-
- for (uint32_t c = 0; iter.HasNext(); iter.Next(), ++c) {
- sw->addCase(irb_.getInt32(c), GetBasicBlock(iter.GetHandlerAddress()));
- }
-
- // Restore the orignal insert point for IRBuilder
- irb_.restoreIP(irb_ip_original);
-
- // Cache this landing pad
- DCHECK_GT(basic_block_landing_pads_.size(), static_cast<size_t>(ti_offset));
- basic_block_landing_pads_[ti_offset] = block_lpad;
-
- return block_lpad;
-}
-
-llvm::BasicBlock* GBCExpanderPass::GetUnwindBasicBlock() {
- // Check the existing unwinding baisc block block
- if (basic_block_unwind_ != NULL) {
- return basic_block_unwind_;
- }
-
- // Create new basic block for unwinding
- basic_block_unwind_ =
- llvm::BasicBlock::Create(context_, "exception_unwind", func_);
-
- // Change IRBuilder insert point
- llvm::IRBuilderBase::InsertPoint irb_ip_original = irb_.saveIP();
- irb_.SetInsertPoint(basic_block_unwind_);
-
- // Pop the shadow frame
- Expand_PopShadowFrame();
-
- // Emit the code to return default value (zero) for the given return type.
- char ret_shorty = dex_compilation_unit_->GetShorty()[0];
- ret_shorty = art::RemapShorty(ret_shorty);
- if (ret_shorty == 'V') {
- irb_.CreateRetVoid();
- } else {
- irb_.CreateRet(irb_.getJZero(ret_shorty));
- }
-
- // Restore the orignal insert point for IRBuilder
- irb_.restoreIP(irb_ip_original);
-
- return basic_block_unwind_;
-}
-
-void GBCExpanderPass::EmitBranchExceptionLandingPad(uint32_t dex_pc) {
- if (llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc)) {
- landing_pad_phi_mapping_[lpad].push_back(std::make_pair(current_bb_->getUniquePredecessor(),
- irb_.GetInsertBlock()));
- irb_.CreateBr(lpad);
- } else {
- irb_.CreateBr(GetUnwindBasicBlock());
- }
-}
-
-void GBCExpanderPass::EmitGuard_ExceptionLandingPad(uint32_t dex_pc) {
- llvm::Value* exception_pending = irb_.Runtime().EmitIsExceptionPending();
-
- llvm::BasicBlock* block_cont = CreateBasicBlockWithDexPC(dex_pc, "cont");
-
- if (llvm::BasicBlock* lpad = GetLandingPadBasicBlock(dex_pc)) {
- landing_pad_phi_mapping_[lpad].push_back(std::make_pair(current_bb_->getUniquePredecessor(),
- irb_.GetInsertBlock()));
- irb_.CreateCondBr(exception_pending, lpad, block_cont, kUnlikely);
- } else {
- irb_.CreateCondBr(exception_pending, GetUnwindBasicBlock(), block_cont, kUnlikely);
- }
-
- irb_.SetInsertPoint(block_cont);
-}
-
-llvm::Value*
-GBCExpanderPass::ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
- llvm::CallInst& call_inst) {
- switch (intr_id) {
- //==- Thread -----------------------------------------------------------==//
- case IntrinsicHelper::GetCurrentThread: {
- return irb_.Runtime().EmitGetCurrentThread();
- }
- case IntrinsicHelper::CheckSuspend: {
- Expand_TestSuspend(call_inst);
- return NULL;
- }
- case IntrinsicHelper::TestSuspend: {
- Expand_TestSuspend(call_inst);
- return NULL;
- }
- case IntrinsicHelper::MarkGCCard: {
- Expand_MarkGCCard(call_inst);
- return NULL;
- }
-
- //==- Exception --------------------------------------------------------==//
- case IntrinsicHelper::ThrowException: {
- return ExpandToRuntime(ThrowException, call_inst);
- }
- case IntrinsicHelper::HLThrowException: {
- uint32_t dex_pc = LV2UInt(call_inst.getMetadata("DexOff")->getOperand(0));
-
- EmitUpdateDexPC(dex_pc);
-
- irb_.CreateCall(irb_.GetRuntime(ThrowException),
- call_inst.getArgOperand(0));
-
- EmitGuard_ExceptionLandingPad(dex_pc);
- return NULL;
- }
- case IntrinsicHelper::GetException: {
- return irb_.Runtime().EmitGetAndClearException();
- }
- case IntrinsicHelper::IsExceptionPending: {
- return irb_.Runtime().EmitIsExceptionPending();
- }
- case IntrinsicHelper::FindCatchBlock: {
- return ExpandToRuntime(FindCatchBlock, call_inst);
- }
- case IntrinsicHelper::ThrowDivZeroException: {
- return ExpandToRuntime(ThrowDivZeroException, call_inst);
- }
- case IntrinsicHelper::ThrowNullPointerException: {
- return ExpandToRuntime(ThrowNullPointerException, call_inst);
- }
- case IntrinsicHelper::ThrowIndexOutOfBounds: {
- return ExpandToRuntime(ThrowIndexOutOfBounds, call_inst);
- }
-
- //==- Const String -----------------------------------------------------==//
- case IntrinsicHelper::ConstString: {
- return Expand_ConstString(call_inst);
- }
- case IntrinsicHelper::LoadStringFromDexCache: {
- return Expand_LoadStringFromDexCache(call_inst.getArgOperand(0));
- }
- case IntrinsicHelper::ResolveString: {
- return ExpandToRuntime(ResolveString, call_inst);
- }
-
- //==- Const Class ------------------------------------------------------==//
- case IntrinsicHelper::ConstClass: {
- return Expand_ConstClass(call_inst);
- }
- case IntrinsicHelper::InitializeTypeAndVerifyAccess: {
- return ExpandToRuntime(InitializeTypeAndVerifyAccess, call_inst);
- }
- case IntrinsicHelper::LoadTypeFromDexCache: {
- return Expand_LoadTypeFromDexCache(call_inst.getArgOperand(0));
- }
- case IntrinsicHelper::InitializeType: {
- return ExpandToRuntime(InitializeType, call_inst);
- }
-
- //==- Lock -------------------------------------------------------------==//
- case IntrinsicHelper::LockObject: {
- Expand_LockObject(call_inst.getArgOperand(0));
- return NULL;
- }
- case IntrinsicHelper::UnlockObject: {
- Expand_UnlockObject(call_inst.getArgOperand(0));
- return NULL;
- }
-
- //==- Cast -------------------------------------------------------------==//
- case IntrinsicHelper::CheckCast: {
- return ExpandToRuntime(CheckCast, call_inst);
- }
- case IntrinsicHelper::HLCheckCast: {
- Expand_HLCheckCast(call_inst);
- return NULL;
- }
- case IntrinsicHelper::IsAssignable: {
- return ExpandToRuntime(IsAssignable, call_inst);
- }
-
- //==- Alloc ------------------------------------------------------------==//
- case IntrinsicHelper::AllocObject: {
- return ExpandToRuntime(AllocObject, call_inst);
- }
- case IntrinsicHelper::AllocObjectWithAccessCheck: {
- return ExpandToRuntime(AllocObjectWithAccessCheck, call_inst);
- }
-
- //==- Instance ---------------------------------------------------------==//
- case IntrinsicHelper::NewInstance: {
- return Expand_NewInstance(call_inst);
- }
- case IntrinsicHelper::InstanceOf: {
- return Expand_InstanceOf(call_inst);
- }
-
- //==- Array ------------------------------------------------------------==//
- case IntrinsicHelper::NewArray: {
- return Expand_NewArray(call_inst);
- }
- case IntrinsicHelper::OptArrayLength: {
- return Expand_OptArrayLength(call_inst);
- }
- case IntrinsicHelper::ArrayLength: {
- return EmitLoadArrayLength(call_inst.getArgOperand(0));
- }
- case IntrinsicHelper::AllocArray: {
- return ExpandToRuntime(AllocArray, call_inst);
- }
- case IntrinsicHelper::AllocArrayWithAccessCheck: {
- return ExpandToRuntime(AllocArrayWithAccessCheck,
- call_inst);
- }
- case IntrinsicHelper::CheckAndAllocArray: {
- return ExpandToRuntime(CheckAndAllocArray, call_inst);
- }
- case IntrinsicHelper::CheckAndAllocArrayWithAccessCheck: {
- return ExpandToRuntime(CheckAndAllocArrayWithAccessCheck,
- call_inst);
- }
- case IntrinsicHelper::ArrayGet: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kInt);
- }
- case IntrinsicHelper::ArrayGetWide: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kLong);
- }
- case IntrinsicHelper::ArrayGetObject: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kObject);
- }
- case IntrinsicHelper::ArrayGetBoolean: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kBoolean);
- }
- case IntrinsicHelper::ArrayGetByte: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kByte);
- }
- case IntrinsicHelper::ArrayGetChar: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kChar);
- }
- case IntrinsicHelper::ArrayGetShort: {
- return Expand_ArrayGet(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kShort);
- }
- case IntrinsicHelper::ArrayPut: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kInt);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutWide: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kLong);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutObject: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kObject);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutBoolean: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kBoolean);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutByte: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kByte);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutChar: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kChar);
- return NULL;
- }
- case IntrinsicHelper::ArrayPutShort: {
- Expand_ArrayPut(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kShort);
- return NULL;
- }
- case IntrinsicHelper::CheckPutArrayElement: {
- return ExpandToRuntime(CheckPutArrayElement, call_inst);
- }
- case IntrinsicHelper::FilledNewArray: {
- Expand_FilledNewArray(call_inst);
- return NULL;
- }
- case IntrinsicHelper::FillArrayData: {
- return ExpandToRuntime(FillArrayData, call_inst);
- }
- case IntrinsicHelper::HLFillArrayData: {
- Expand_HLFillArrayData(call_inst);
- return NULL;
- }
- case IntrinsicHelper::HLFilledNewArray: {
- return Expand_HLFilledNewArray(call_inst);
- }
-
- //==- Instance Field ---------------------------------------------------==//
- case IntrinsicHelper::InstanceFieldGet:
- case IntrinsicHelper::InstanceFieldGetBoolean:
- case IntrinsicHelper::InstanceFieldGetByte:
- case IntrinsicHelper::InstanceFieldGetChar:
- case IntrinsicHelper::InstanceFieldGetShort: {
- return ExpandToRuntime(Get32Instance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldGetWide: {
- return ExpandToRuntime(Get64Instance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldGetObject: {
- return ExpandToRuntime(GetObjectInstance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldGetFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kInt);
- }
- case IntrinsicHelper::InstanceFieldGetWideFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kLong);
- }
- case IntrinsicHelper::InstanceFieldGetObjectFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kObject);
- }
- case IntrinsicHelper::InstanceFieldGetBooleanFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kBoolean);
- }
- case IntrinsicHelper::InstanceFieldGetByteFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kByte);
- }
- case IntrinsicHelper::InstanceFieldGetCharFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kChar);
- }
- case IntrinsicHelper::InstanceFieldGetShortFast: {
- return Expand_IGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kShort);
- }
- case IntrinsicHelper::InstanceFieldPut:
- case IntrinsicHelper::InstanceFieldPutBoolean:
- case IntrinsicHelper::InstanceFieldPutByte:
- case IntrinsicHelper::InstanceFieldPutChar:
- case IntrinsicHelper::InstanceFieldPutShort: {
- return ExpandToRuntime(Set32Instance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldPutWide: {
- return ExpandToRuntime(Set64Instance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldPutObject: {
- return ExpandToRuntime(SetObjectInstance, call_inst);
- }
- case IntrinsicHelper::InstanceFieldPutFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kInt);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutWideFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kLong);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutObjectFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kObject);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutBooleanFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kBoolean);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutByteFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kByte);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutCharFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kChar);
- return NULL;
- }
- case IntrinsicHelper::InstanceFieldPutShortFast: {
- Expand_IPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kShort);
- return NULL;
- }
-
- //==- Static Field -----------------------------------------------------==//
- case IntrinsicHelper::StaticFieldGet:
- case IntrinsicHelper::StaticFieldGetBoolean:
- case IntrinsicHelper::StaticFieldGetByte:
- case IntrinsicHelper::StaticFieldGetChar:
- case IntrinsicHelper::StaticFieldGetShort: {
- return ExpandToRuntime(Get32Static, call_inst);
- }
- case IntrinsicHelper::StaticFieldGetWide: {
- return ExpandToRuntime(Get64Static, call_inst);
- }
- case IntrinsicHelper::StaticFieldGetObject: {
- return ExpandToRuntime(GetObjectStatic, call_inst);
- }
- case IntrinsicHelper::StaticFieldGetFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kInt);
- }
- case IntrinsicHelper::StaticFieldGetWideFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kLong);
- }
- case IntrinsicHelper::StaticFieldGetObjectFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kObject);
- }
- case IntrinsicHelper::StaticFieldGetBooleanFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kBoolean);
- }
- case IntrinsicHelper::StaticFieldGetByteFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kByte);
- }
- case IntrinsicHelper::StaticFieldGetCharFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kChar);
- }
- case IntrinsicHelper::StaticFieldGetShortFast: {
- return Expand_SGetFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- kShort);
- }
- case IntrinsicHelper::StaticFieldPut:
- case IntrinsicHelper::StaticFieldPutBoolean:
- case IntrinsicHelper::StaticFieldPutByte:
- case IntrinsicHelper::StaticFieldPutChar:
- case IntrinsicHelper::StaticFieldPutShort: {
- return ExpandToRuntime(Set32Static, call_inst);
- }
- case IntrinsicHelper::StaticFieldPutWide: {
- return ExpandToRuntime(Set64Static, call_inst);
- }
- case IntrinsicHelper::StaticFieldPutObject: {
- return ExpandToRuntime(SetObjectStatic, call_inst);
- }
- case IntrinsicHelper::StaticFieldPutFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kInt);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutWideFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kLong);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutObjectFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kObject);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutBooleanFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kBoolean);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutByteFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kByte);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutCharFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kChar);
- return NULL;
- }
- case IntrinsicHelper::StaticFieldPutShortFast: {
- Expand_SPutFast(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- call_inst.getArgOperand(2),
- call_inst.getArgOperand(3),
- kShort);
- return NULL;
- }
- case IntrinsicHelper::LoadDeclaringClassSSB: {
- return Expand_LoadDeclaringClassSSB(call_inst.getArgOperand(0));
- }
- case IntrinsicHelper::InitializeAndLoadClassSSB: {
- return ExpandToRuntime(InitializeStaticStorage, call_inst);
- }
-
- //==- High-level Array -------------------------------------------------==//
- case IntrinsicHelper::HLArrayGet: {
- return Expand_HLArrayGet(call_inst, kInt);
- }
- case IntrinsicHelper::HLArrayGetBoolean: {
- return Expand_HLArrayGet(call_inst, kBoolean);
- }
- case IntrinsicHelper::HLArrayGetByte: {
- return Expand_HLArrayGet(call_inst, kByte);
- }
- case IntrinsicHelper::HLArrayGetChar: {
- return Expand_HLArrayGet(call_inst, kChar);
- }
- case IntrinsicHelper::HLArrayGetShort: {
- return Expand_HLArrayGet(call_inst, kShort);
- }
- case IntrinsicHelper::HLArrayGetFloat: {
- return Expand_HLArrayGet(call_inst, kFloat);
- }
- case IntrinsicHelper::HLArrayGetWide: {
- return Expand_HLArrayGet(call_inst, kLong);
- }
- case IntrinsicHelper::HLArrayGetDouble: {
- return Expand_HLArrayGet(call_inst, kDouble);
- }
- case IntrinsicHelper::HLArrayGetObject: {
- return Expand_HLArrayGet(call_inst, kObject);
- }
- case IntrinsicHelper::HLArrayPut: {
- Expand_HLArrayPut(call_inst, kInt);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutBoolean: {
- Expand_HLArrayPut(call_inst, kBoolean);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutByte: {
- Expand_HLArrayPut(call_inst, kByte);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutChar: {
- Expand_HLArrayPut(call_inst, kChar);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutShort: {
- Expand_HLArrayPut(call_inst, kShort);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutFloat: {
- Expand_HLArrayPut(call_inst, kFloat);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutWide: {
- Expand_HLArrayPut(call_inst, kLong);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutDouble: {
- Expand_HLArrayPut(call_inst, kDouble);
- return NULL;
- }
- case IntrinsicHelper::HLArrayPutObject: {
- Expand_HLArrayPut(call_inst, kObject);
- return NULL;
- }
-
- //==- High-level Instance ----------------------------------------------==//
- case IntrinsicHelper::HLIGet: {
- return Expand_HLIGet(call_inst, kInt);
- }
- case IntrinsicHelper::HLIGetBoolean: {
- return Expand_HLIGet(call_inst, kBoolean);
- }
- case IntrinsicHelper::HLIGetByte: {
- return Expand_HLIGet(call_inst, kByte);
- }
- case IntrinsicHelper::HLIGetChar: {
- return Expand_HLIGet(call_inst, kChar);
- }
- case IntrinsicHelper::HLIGetShort: {
- return Expand_HLIGet(call_inst, kShort);
- }
- case IntrinsicHelper::HLIGetFloat: {
- return Expand_HLIGet(call_inst, kFloat);
- }
- case IntrinsicHelper::HLIGetWide: {
- return Expand_HLIGet(call_inst, kLong);
- }
- case IntrinsicHelper::HLIGetDouble: {
- return Expand_HLIGet(call_inst, kDouble);
- }
- case IntrinsicHelper::HLIGetObject: {
- return Expand_HLIGet(call_inst, kObject);
- }
- case IntrinsicHelper::HLIPut: {
- Expand_HLIPut(call_inst, kInt);
- return NULL;
- }
- case IntrinsicHelper::HLIPutBoolean: {
- Expand_HLIPut(call_inst, kBoolean);
- return NULL;
- }
- case IntrinsicHelper::HLIPutByte: {
- Expand_HLIPut(call_inst, kByte);
- return NULL;
- }
- case IntrinsicHelper::HLIPutChar: {
- Expand_HLIPut(call_inst, kChar);
- return NULL;
- }
- case IntrinsicHelper::HLIPutShort: {
- Expand_HLIPut(call_inst, kShort);
- return NULL;
- }
- case IntrinsicHelper::HLIPutFloat: {
- Expand_HLIPut(call_inst, kFloat);
- return NULL;
- }
- case IntrinsicHelper::HLIPutWide: {
- Expand_HLIPut(call_inst, kLong);
- return NULL;
- }
- case IntrinsicHelper::HLIPutDouble: {
- Expand_HLIPut(call_inst, kDouble);
- return NULL;
- }
- case IntrinsicHelper::HLIPutObject: {
- Expand_HLIPut(call_inst, kObject);
- return NULL;
- }
-
- //==- High-level Invoke ------------------------------------------------==//
- case IntrinsicHelper::HLInvokeVoid:
- case IntrinsicHelper::HLInvokeObj:
- case IntrinsicHelper::HLInvokeInt:
- case IntrinsicHelper::HLInvokeFloat:
- case IntrinsicHelper::HLInvokeLong:
- case IntrinsicHelper::HLInvokeDouble: {
- return Expand_HLInvoke(call_inst);
- }
-
- //==- Invoke -----------------------------------------------------------==//
- case IntrinsicHelper::FindStaticMethodWithAccessCheck: {
- return ExpandToRuntime(FindStaticMethodWithAccessCheck, call_inst);
- }
- case IntrinsicHelper::FindDirectMethodWithAccessCheck: {
- return ExpandToRuntime(FindDirectMethodWithAccessCheck, call_inst);
- }
- case IntrinsicHelper::FindVirtualMethodWithAccessCheck: {
- return ExpandToRuntime(FindVirtualMethodWithAccessCheck, call_inst);
- }
- case IntrinsicHelper::FindSuperMethodWithAccessCheck: {
- return ExpandToRuntime(FindSuperMethodWithAccessCheck, call_inst);
- }
- case IntrinsicHelper::FindInterfaceMethodWithAccessCheck: {
- return ExpandToRuntime(FindInterfaceMethodWithAccessCheck, call_inst);
- }
- case IntrinsicHelper::GetSDCalleeMethodObjAddrFast: {
- return Expand_GetSDCalleeMethodObjAddrFast(call_inst.getArgOperand(0));
- }
- case IntrinsicHelper::GetVirtualCalleeMethodObjAddrFast: {
- return Expand_GetVirtualCalleeMethodObjAddrFast(
- call_inst.getArgOperand(0), call_inst.getArgOperand(1));
- }
- case IntrinsicHelper::GetInterfaceCalleeMethodObjAddrFast: {
- return ExpandToRuntime(FindInterfaceMethod, call_inst);
- }
- case IntrinsicHelper::InvokeRetVoid:
- case IntrinsicHelper::InvokeRetBoolean:
- case IntrinsicHelper::InvokeRetByte:
- case IntrinsicHelper::InvokeRetChar:
- case IntrinsicHelper::InvokeRetShort:
- case IntrinsicHelper::InvokeRetInt:
- case IntrinsicHelper::InvokeRetLong:
- case IntrinsicHelper::InvokeRetFloat:
- case IntrinsicHelper::InvokeRetDouble:
- case IntrinsicHelper::InvokeRetObject: {
- return Expand_Invoke(call_inst);
- }
-
- //==- Math -------------------------------------------------------------==//
- case IntrinsicHelper::DivInt: {
- return Expand_DivRem(call_inst, /* is_div */true, kInt);
- }
- case IntrinsicHelper::RemInt: {
- return Expand_DivRem(call_inst, /* is_div */false, kInt);
- }
- case IntrinsicHelper::DivLong: {
- return Expand_DivRem(call_inst, /* is_div */true, kLong);
- }
- case IntrinsicHelper::RemLong: {
- return Expand_DivRem(call_inst, /* is_div */false, kLong);
- }
- case IntrinsicHelper::D2L: {
- return ExpandToRuntime(art_d2l, call_inst);
- }
- case IntrinsicHelper::D2I: {
- return ExpandToRuntime(art_d2i, call_inst);
- }
- case IntrinsicHelper::F2L: {
- return ExpandToRuntime(art_f2l, call_inst);
- }
- case IntrinsicHelper::F2I: {
- return ExpandToRuntime(art_f2i, call_inst);
- }
-
- //==- High-level Static ------------------------------------------------==//
- case IntrinsicHelper::HLSget: {
- return Expand_HLSget(call_inst, kInt);
- }
- case IntrinsicHelper::HLSgetBoolean: {
- return Expand_HLSget(call_inst, kBoolean);
- }
- case IntrinsicHelper::HLSgetByte: {
- return Expand_HLSget(call_inst, kByte);
- }
- case IntrinsicHelper::HLSgetChar: {
- return Expand_HLSget(call_inst, kChar);
- }
- case IntrinsicHelper::HLSgetShort: {
- return Expand_HLSget(call_inst, kShort);
- }
- case IntrinsicHelper::HLSgetFloat: {
- return Expand_HLSget(call_inst, kFloat);
- }
- case IntrinsicHelper::HLSgetWide: {
- return Expand_HLSget(call_inst, kLong);
- }
- case IntrinsicHelper::HLSgetDouble: {
- return Expand_HLSget(call_inst, kDouble);
- }
- case IntrinsicHelper::HLSgetObject: {
- return Expand_HLSget(call_inst, kObject);
- }
- case IntrinsicHelper::HLSput: {
- Expand_HLSput(call_inst, kInt);
- return NULL;
- }
- case IntrinsicHelper::HLSputBoolean: {
- Expand_HLSput(call_inst, kBoolean);
- return NULL;
- }
- case IntrinsicHelper::HLSputByte: {
- Expand_HLSput(call_inst, kByte);
- return NULL;
- }
- case IntrinsicHelper::HLSputChar: {
- Expand_HLSput(call_inst, kChar);
- return NULL;
- }
- case IntrinsicHelper::HLSputShort: {
- Expand_HLSput(call_inst, kShort);
- return NULL;
- }
- case IntrinsicHelper::HLSputFloat: {
- Expand_HLSput(call_inst, kFloat);
- return NULL;
- }
- case IntrinsicHelper::HLSputWide: {
- Expand_HLSput(call_inst, kLong);
- return NULL;
- }
- case IntrinsicHelper::HLSputDouble: {
- Expand_HLSput(call_inst, kDouble);
- return NULL;
- }
- case IntrinsicHelper::HLSputObject: {
- Expand_HLSput(call_inst, kObject);
- return NULL;
- }
-
- //==- High-level Monitor -----------------------------------------------==//
- case IntrinsicHelper::MonitorEnter: {
- Expand_MonitorEnter(call_inst);
- return NULL;
- }
- case IntrinsicHelper::MonitorExit: {
- Expand_MonitorExit(call_inst);
- return NULL;
- }
-
- //==- Shadow Frame -----------------------------------------------------==//
- case IntrinsicHelper::AllocaShadowFrame: {
- Expand_AllocaShadowFrame(call_inst.getArgOperand(0));
- return NULL;
- }
- case IntrinsicHelper::SetVReg: {
- Expand_SetVReg(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1));
- return NULL;
- }
- case IntrinsicHelper::PopShadowFrame: {
- Expand_PopShadowFrame();
- return NULL;
- }
- case IntrinsicHelper::UpdateDexPC: {
- Expand_UpdateDexPC(call_inst.getArgOperand(0));
- return NULL;
- }
-
- //==- Comparison -------------------------------------------------------==//
- case IntrinsicHelper::CmplFloat:
- case IntrinsicHelper::CmplDouble: {
- return Expand_FPCompare(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- false);
- }
- case IntrinsicHelper::CmpgFloat:
- case IntrinsicHelper::CmpgDouble: {
- return Expand_FPCompare(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- true);
- }
- case IntrinsicHelper::CmpLong: {
- return Expand_LongCompare(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1));
- }
-
- //==- Const ------------------------------------------------------------==//
- case IntrinsicHelper::ConstInt:
- case IntrinsicHelper::ConstLong: {
- return call_inst.getArgOperand(0);
- }
- case IntrinsicHelper::ConstFloat: {
- return irb_.CreateBitCast(call_inst.getArgOperand(0),
- irb_.getJFloatTy());
- }
- case IntrinsicHelper::ConstDouble: {
- return irb_.CreateBitCast(call_inst.getArgOperand(0),
- irb_.getJDoubleTy());
- }
- case IntrinsicHelper::ConstObj: {
- CHECK_EQ(LV2UInt(call_inst.getArgOperand(0)), 0U);
- return irb_.getJNull();
- }
-
- //==- Method Info ------------------------------------------------------==//
- case IntrinsicHelper::MethodInfo: {
- // Nothing to be done, because MethodInfo carries optional hints that are
- // not needed by the portable path.
- return NULL;
- }
-
- //==- Copy -------------------------------------------------------------==//
- case IntrinsicHelper::CopyInt:
- case IntrinsicHelper::CopyFloat:
- case IntrinsicHelper::CopyLong:
- case IntrinsicHelper::CopyDouble:
- case IntrinsicHelper::CopyObj: {
- return call_inst.getArgOperand(0);
- }
-
- //==- Shift ------------------------------------------------------------==//
- case IntrinsicHelper::SHLLong: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerSHL, kLong);
- }
- case IntrinsicHelper::SHRLong: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerSHR, kLong);
- }
- case IntrinsicHelper::USHRLong: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerUSHR, kLong);
- }
- case IntrinsicHelper::SHLInt: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerSHL, kInt);
- }
- case IntrinsicHelper::SHRInt: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerSHR, kInt);
- }
- case IntrinsicHelper::USHRInt: {
- return Expand_IntegerShift(call_inst.getArgOperand(0),
- call_inst.getArgOperand(1),
- kIntegerUSHR, kInt);
- }
-
- //==- Conversion -------------------------------------------------------==//
- case IntrinsicHelper::IntToChar: {
- return irb_.CreateZExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJCharTy()),
- irb_.getJIntTy());
- }
- case IntrinsicHelper::IntToShort: {
- return irb_.CreateSExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJShortTy()),
- irb_.getJIntTy());
- }
- case IntrinsicHelper::IntToByte: {
- return irb_.CreateSExt(irb_.CreateTrunc(call_inst.getArgOperand(0), irb_.getJByteTy()),
- irb_.getJIntTy());
- }
-
- //==- Exception --------------------------------------------------------==//
- case IntrinsicHelper::CatchTargets: {
- UpdatePhiInstruction(current_bb_, irb_.GetInsertBlock());
- llvm::SwitchInst* si = llvm::dyn_cast<llvm::SwitchInst>(call_inst.getNextNode());
- CHECK(si != NULL);
- irb_.CreateBr(si->getDefaultDest());
- si->eraseFromParent();
- return call_inst.getArgOperand(0);
- }
-
- //==- Constructor barrier-----------------------------------------------==//
- case IntrinsicHelper::ConstructorBarrier: {
- irb_.CreateMemoryBarrier(art::kStoreStore);
- return NULL;
- }
-
- //==- Unknown Cases ----------------------------------------------------==//
- case IntrinsicHelper::MaxIntrinsicId:
- case IntrinsicHelper::UnknownId:
- // default:
- // NOTE: "default" is intentionally commented so that C/C++ compiler will
- // give some warning on unmatched cases.
- // NOTE: We should not implement these cases.
- break;
- }
- UNIMPLEMENTED(FATAL) << "Unexpected GBC intrinsic: " << static_cast<int>(intr_id);
- return NULL;
-} // NOLINT(readability/fn_size)
-
-} // anonymous namespace
-
-namespace art {
-namespace llvm {
-
-::llvm::FunctionPass*
-CreateGBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
- CompilerDriver* driver, const DexCompilationUnit* dex_compilation_unit) {
- return new GBCExpanderPass(intrinsic_helper, irb, driver, dex_compilation_unit);
-}
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/generated/art_module.cc b/compiler/llvm/generated/art_module.cc
deleted file mode 100644
index f3c5a5a6c6..0000000000
--- a/compiler/llvm/generated/art_module.cc
+++ /dev/null
@@ -1,1096 +0,0 @@
-// Generated with ./gen_art_module_cc.sh
-
-
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
-// TODO: Remove this pragma after llc can generate makeLLVMModuleContents()
-// with smaller frame size.
-
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/Module.h>
-#include <llvm/IR/Type.h>
-
-#include <vector>
-
-using namespace llvm;
-
-namespace art {
-namespace llvm {
-
-
-// Generated by llvm2cpp - DO NOT MODIFY!
-
-
-Module* makeLLVMModuleContents(Module *mod) {
-
-mod->setModuleIdentifier("art_module.ll");
-
-// Type Definitions
-std::vector<Type*>FuncTy_0_args;
-StructType *StructTy_JavaObject = mod->getTypeByName("JavaObject");
-if (!StructTy_JavaObject) {
-StructTy_JavaObject = StructType::create(mod->getContext(), "JavaObject");
-}
-std::vector<Type*>StructTy_JavaObject_fields;
-if (StructTy_JavaObject->isOpaque()) {
-StructTy_JavaObject->setBody(StructTy_JavaObject_fields, /*isPacked=*/false);
-}
-
-PointerType* PointerTy_1 = PointerType::get(StructTy_JavaObject, 0);
-
-FuncTy_0_args.push_back(PointerTy_1);
-StructType *StructTy_ShadowFrame = mod->getTypeByName("ShadowFrame");
-if (!StructTy_ShadowFrame) {
-StructTy_ShadowFrame = StructType::create(mod->getContext(), "ShadowFrame");
-}
-std::vector<Type*>StructTy_ShadowFrame_fields;
-StructTy_ShadowFrame_fields.push_back(IntegerType::get(mod->getContext(), 32));
-PointerType* PointerTy_2 = PointerType::get(StructTy_ShadowFrame, 0);
-
-StructTy_ShadowFrame_fields.push_back(PointerTy_2);
-StructTy_ShadowFrame_fields.push_back(PointerTy_1);
-StructTy_ShadowFrame_fields.push_back(IntegerType::get(mod->getContext(), 32));
-if (StructTy_ShadowFrame->isOpaque()) {
-StructTy_ShadowFrame->setBody(StructTy_ShadowFrame_fields, /*isPacked=*/false);
-}
-
-
-FuncTy_0_args.push_back(PointerTy_2);
-FunctionType* FuncTy_0 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_0_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_3_args;
-FunctionType* FuncTy_3 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_3_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_4_args;
-FuncTy_4_args.push_back(PointerTy_1);
-FunctionType* FuncTy_4 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_4_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_5_args;
-FuncTy_5_args.push_back(PointerTy_1);
-FuncTy_5_args.push_back(PointerTy_1);
-FunctionType* FuncTy_5 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_5_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_6_args;
-FuncTy_6_args.push_back(PointerTy_1);
-FunctionType* FuncTy_6 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_6_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_7_args;
-FuncTy_7_args.push_back(PointerTy_1);
-FuncTy_7_args.push_back(PointerTy_2);
-FuncTy_7_args.push_back(PointerTy_1);
-FuncTy_7_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_7 = FunctionType::get(
- /*Result=*/PointerTy_2,
- /*Params=*/FuncTy_7_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_8_args;
-FuncTy_8_args.push_back(PointerTy_2);
-FunctionType* FuncTy_8 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_8_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_9_args;
-FunctionType* FuncTy_9 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_9_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_10_args;
-FuncTy_10_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_10_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_10 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_10_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_11_args;
-FuncTy_11_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_11 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_11_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_12_args;
-FuncTy_12_args.push_back(PointerTy_1);
-FuncTy_12_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_12 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_12_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_13_args;
-FuncTy_13_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_13_args.push_back(PointerTy_1);
-FuncTy_13_args.push_back(PointerTy_1);
-FunctionType* FuncTy_13 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_13_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_14_args;
-FuncTy_14_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_14_args.push_back(PointerTy_1);
-FuncTy_14_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_14_args.push_back(PointerTy_1);
-FunctionType* FuncTy_14 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_14_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_15_args;
-FuncTy_15_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_15_args.push_back(PointerTy_1);
-FunctionType* FuncTy_15 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_15_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_16_args;
-FuncTy_16_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_16_args.push_back(PointerTy_1);
-FuncTy_16_args.push_back(PointerTy_1);
-FuncTy_16_args.push_back(PointerTy_1);
-FunctionType* FuncTy_16 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_16_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_17_args;
-FuncTy_17_args.push_back(PointerTy_1);
-FuncTy_17_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_17 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_17_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_18_args;
-FuncTy_18_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_18_args.push_back(PointerTy_1);
-FuncTy_18_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_18 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_18_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_19_args;
-FuncTy_19_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_19_args.push_back(PointerTy_1);
-FuncTy_19_args.push_back(IntegerType::get(mod->getContext(), 64));
-FunctionType* FuncTy_19 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_19_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_20_args;
-FuncTy_20_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_20_args.push_back(PointerTy_1);
-FuncTy_20_args.push_back(PointerTy_1);
-FunctionType* FuncTy_20 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_20_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_21_args;
-FuncTy_21_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_21_args.push_back(PointerTy_1);
-FunctionType* FuncTy_21 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_21_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_22_args;
-FuncTy_22_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_22_args.push_back(PointerTy_1);
-FunctionType* FuncTy_22 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 64),
- /*Params=*/FuncTy_22_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_23_args;
-FuncTy_23_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_23_args.push_back(PointerTy_1);
-FunctionType* FuncTy_23 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_23_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_24_args;
-FuncTy_24_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_24_args.push_back(PointerTy_1);
-FuncTy_24_args.push_back(PointerTy_1);
-FuncTy_24_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_24 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_24_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_25_args;
-FuncTy_25_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_25_args.push_back(PointerTy_1);
-FuncTy_25_args.push_back(PointerTy_1);
-FuncTy_25_args.push_back(IntegerType::get(mod->getContext(), 64));
-FunctionType* FuncTy_25 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_25_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_26_args;
-FuncTy_26_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_26_args.push_back(PointerTy_1);
-FuncTy_26_args.push_back(PointerTy_1);
-FuncTy_26_args.push_back(PointerTy_1);
-FunctionType* FuncTy_26 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_26_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_27_args;
-FuncTy_27_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_27_args.push_back(PointerTy_1);
-FuncTy_27_args.push_back(PointerTy_1);
-FunctionType* FuncTy_27 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 64),
- /*Params=*/FuncTy_27_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_28_args;
-FuncTy_28_args.push_back(PointerTy_1);
-FuncTy_28_args.push_back(PointerTy_1);
-FunctionType* FuncTy_28 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_28_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_29_args;
-FuncTy_29_args.push_back(PointerTy_1);
-FuncTy_29_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_29_args.push_back(PointerTy_1);
-FuncTy_29_args.push_back(IntegerType::get(mod->getContext(), 32));
-FunctionType* FuncTy_29 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_29_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_30_args;
-FuncTy_30_args.push_back(PointerTy_1);
-FuncTy_30_args.push_back(PointerTy_1);
-FunctionType* FuncTy_30 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_30_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_31_args;
-FuncTy_31_args.push_back(Type::getDoubleTy(mod->getContext()));
-FunctionType* FuncTy_31 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 64),
- /*Params=*/FuncTy_31_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_32_args;
-FuncTy_32_args.push_back(Type::getDoubleTy(mod->getContext()));
-FunctionType* FuncTy_32 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_32_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_33_args;
-FuncTy_33_args.push_back(Type::getFloatTy(mod->getContext()));
-FunctionType* FuncTy_33 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 64),
- /*Params=*/FuncTy_33_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_34_args;
-FuncTy_34_args.push_back(Type::getFloatTy(mod->getContext()));
-FunctionType* FuncTy_34 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_34_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_35_args;
-FuncTy_35_args.push_back(PointerTy_1);
-FunctionType* FuncTy_35 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 32),
- /*Params=*/FuncTy_35_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_36_args;
-FuncTy_36_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_36_args.push_back(PointerTy_1);
-FuncTy_36_args.push_back(PointerTy_1);
-FunctionType* FuncTy_36 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_36_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_37_args;
-FuncTy_37_args.push_back(PointerTy_1);
-FuncTy_37_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_37_args.push_back(PointerTy_1);
-FunctionType* FuncTy_37 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_37_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_38_args;
-FuncTy_38_args.push_back(PointerTy_1);
-FuncTy_38_args.push_back(IntegerType::get(mod->getContext(), 32));
-FuncTy_38_args.push_back(PointerTy_1);
-FuncTy_38_args.push_back(PointerTy_1);
-FunctionType* FuncTy_38 = FunctionType::get(
- /*Result=*/PointerTy_1,
- /*Params=*/FuncTy_38_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_39_args;
-FunctionType* FuncTy_39 = FunctionType::get(
- /*Result=*/IntegerType::get(mod->getContext(), 1),
- /*Params=*/FuncTy_39_args,
- /*isVarArg=*/false);
-
-std::vector<Type*>FuncTy_40_args;
-FuncTy_40_args.push_back(PointerTy_1);
-FunctionType* FuncTy_40 = FunctionType::get(
- /*Result=*/Type::getVoidTy(mod->getContext()),
- /*Params=*/FuncTy_40_args,
- /*isVarArg=*/true);
-
-
-// Function Declarations
-
-Function* func___art_type_list = mod->getFunction("__art_type_list");
-if (!func___art_type_list) {
-func___art_type_list = Function::Create(
- /*Type=*/FuncTy_0,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"__art_type_list", mod); // (external, no body)
-func___art_type_list->setCallingConv(CallingConv::C);
-}
-AttributeSet func___art_type_list_PAL;
-func___art_type_list->setAttributes(func___art_type_list_PAL);
-
-Function* func_art_portable_get_current_thread_from_code = mod->getFunction("art_portable_get_current_thread_from_code");
-if (!func_art_portable_get_current_thread_from_code) {
-func_art_portable_get_current_thread_from_code = Function::Create(
- /*Type=*/FuncTy_3,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get_current_thread_from_code", mod); // (external, no body)
-func_art_portable_get_current_thread_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get_current_thread_from_code_PAL;
-func_art_portable_get_current_thread_from_code->setAttributes(func_art_portable_get_current_thread_from_code_PAL);
-
-Function* func_art_portable_set_current_thread_from_code = mod->getFunction("art_portable_set_current_thread_from_code");
-if (!func_art_portable_set_current_thread_from_code) {
-func_art_portable_set_current_thread_from_code = Function::Create(
- /*Type=*/FuncTy_4,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set_current_thread_from_code", mod); // (external, no body)
-func_art_portable_set_current_thread_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set_current_thread_from_code_PAL;
-func_art_portable_set_current_thread_from_code->setAttributes(func_art_portable_set_current_thread_from_code_PAL);
-
-Function* func_art_portable_lock_object_from_code = mod->getFunction("art_portable_lock_object_from_code");
-if (!func_art_portable_lock_object_from_code) {
-func_art_portable_lock_object_from_code = Function::Create(
- /*Type=*/FuncTy_5,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_lock_object_from_code", mod); // (external, no body)
-func_art_portable_lock_object_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_lock_object_from_code_PAL;
-func_art_portable_lock_object_from_code->setAttributes(func_art_portable_lock_object_from_code_PAL);
-
-Function* func_art_portable_unlock_object_from_code = mod->getFunction("art_portable_unlock_object_from_code");
-if (!func_art_portable_unlock_object_from_code) {
-func_art_portable_unlock_object_from_code = Function::Create(
- /*Type=*/FuncTy_5,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_unlock_object_from_code", mod); // (external, no body)
-func_art_portable_unlock_object_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_unlock_object_from_code_PAL;
-func_art_portable_unlock_object_from_code->setAttributes(func_art_portable_unlock_object_from_code_PAL);
-
-Function* func_art_portable_test_suspend_from_code = mod->getFunction("art_portable_test_suspend_from_code");
-if (!func_art_portable_test_suspend_from_code) {
-func_art_portable_test_suspend_from_code = Function::Create(
- /*Type=*/FuncTy_6,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_test_suspend_from_code", mod); // (external, no body)
-func_art_portable_test_suspend_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_test_suspend_from_code_PAL;
-func_art_portable_test_suspend_from_code->setAttributes(func_art_portable_test_suspend_from_code_PAL);
-
-Function* func_art_portable_push_shadow_frame_from_code = mod->getFunction("art_portable_push_shadow_frame_from_code");
-if (!func_art_portable_push_shadow_frame_from_code) {
-func_art_portable_push_shadow_frame_from_code = Function::Create(
- /*Type=*/FuncTy_7,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_push_shadow_frame_from_code", mod); // (external, no body)
-func_art_portable_push_shadow_frame_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_push_shadow_frame_from_code_PAL;
-func_art_portable_push_shadow_frame_from_code->setAttributes(func_art_portable_push_shadow_frame_from_code_PAL);
-
-Function* func_art_portable_pop_shadow_frame_from_code = mod->getFunction("art_portable_pop_shadow_frame_from_code");
-if (!func_art_portable_pop_shadow_frame_from_code) {
-func_art_portable_pop_shadow_frame_from_code = Function::Create(
- /*Type=*/FuncTy_8,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_pop_shadow_frame_from_code", mod); // (external, no body)
-func_art_portable_pop_shadow_frame_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_pop_shadow_frame_from_code_PAL;
-func_art_portable_pop_shadow_frame_from_code->setAttributes(func_art_portable_pop_shadow_frame_from_code_PAL);
-
-Function* func_art_portable_get_and_clear_exception = mod->getFunction("art_portable_get_and_clear_exception");
-if (!func_art_portable_get_and_clear_exception) {
-func_art_portable_get_and_clear_exception = Function::Create(
- /*Type=*/FuncTy_4,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get_and_clear_exception", mod); // (external, no body)
-func_art_portable_get_and_clear_exception->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get_and_clear_exception_PAL;
-func_art_portable_get_and_clear_exception->setAttributes(func_art_portable_get_and_clear_exception_PAL);
-
-Function* func_art_portable_throw_div_zero_from_code = mod->getFunction("art_portable_throw_div_zero_from_code");
-if (!func_art_portable_throw_div_zero_from_code) {
-func_art_portable_throw_div_zero_from_code = Function::Create(
- /*Type=*/FuncTy_9,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_div_zero_from_code", mod); // (external, no body)
-func_art_portable_throw_div_zero_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_div_zero_from_code_PAL;
-func_art_portable_throw_div_zero_from_code->setAttributes(func_art_portable_throw_div_zero_from_code_PAL);
-
-Function* func_art_portable_throw_array_bounds_from_code = mod->getFunction("art_portable_throw_array_bounds_from_code");
-if (!func_art_portable_throw_array_bounds_from_code) {
-func_art_portable_throw_array_bounds_from_code = Function::Create(
- /*Type=*/FuncTy_10,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_array_bounds_from_code", mod); // (external, no body)
-func_art_portable_throw_array_bounds_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_array_bounds_from_code_PAL;
-func_art_portable_throw_array_bounds_from_code->setAttributes(func_art_portable_throw_array_bounds_from_code_PAL);
-
-Function* func_art_portable_throw_no_such_method_from_code = mod->getFunction("art_portable_throw_no_such_method_from_code");
-if (!func_art_portable_throw_no_such_method_from_code) {
-func_art_portable_throw_no_such_method_from_code = Function::Create(
- /*Type=*/FuncTy_11,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_no_such_method_from_code", mod); // (external, no body)
-func_art_portable_throw_no_such_method_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_no_such_method_from_code_PAL;
-func_art_portable_throw_no_such_method_from_code->setAttributes(func_art_portable_throw_no_such_method_from_code_PAL);
-
-Function* func_art_portable_throw_null_pointer_exception_from_code = mod->getFunction("art_portable_throw_null_pointer_exception_from_code");
-if (!func_art_portable_throw_null_pointer_exception_from_code) {
-func_art_portable_throw_null_pointer_exception_from_code = Function::Create(
- /*Type=*/FuncTy_11,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_null_pointer_exception_from_code", mod); // (external, no body)
-func_art_portable_throw_null_pointer_exception_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_null_pointer_exception_from_code_PAL;
-func_art_portable_throw_null_pointer_exception_from_code->setAttributes(func_art_portable_throw_null_pointer_exception_from_code_PAL);
-
-Function* func_art_portable_throw_stack_overflow_from_code = mod->getFunction("art_portable_throw_stack_overflow_from_code");
-if (!func_art_portable_throw_stack_overflow_from_code) {
-func_art_portable_throw_stack_overflow_from_code = Function::Create(
- /*Type=*/FuncTy_9,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_stack_overflow_from_code", mod); // (external, no body)
-func_art_portable_throw_stack_overflow_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_stack_overflow_from_code_PAL;
-func_art_portable_throw_stack_overflow_from_code->setAttributes(func_art_portable_throw_stack_overflow_from_code_PAL);
-
-Function* func_art_portable_throw_exception_from_code = mod->getFunction("art_portable_throw_exception_from_code");
-if (!func_art_portable_throw_exception_from_code) {
-func_art_portable_throw_exception_from_code = Function::Create(
- /*Type=*/FuncTy_6,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_throw_exception_from_code", mod); // (external, no body)
-func_art_portable_throw_exception_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_throw_exception_from_code_PAL;
-func_art_portable_throw_exception_from_code->setAttributes(func_art_portable_throw_exception_from_code_PAL);
-
-Function* func_art_portable_find_catch_block_from_code = mod->getFunction("art_portable_find_catch_block_from_code");
-if (!func_art_portable_find_catch_block_from_code) {
-func_art_portable_find_catch_block_from_code = Function::Create(
- /*Type=*/FuncTy_12,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_catch_block_from_code", mod); // (external, no body)
-func_art_portable_find_catch_block_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_catch_block_from_code_PAL;
-func_art_portable_find_catch_block_from_code->setAttributes(func_art_portable_find_catch_block_from_code_PAL);
-
-Function* func_art_portable_alloc_object_from_code = mod->getFunction("art_portable_alloc_object_from_code");
-if (!func_art_portable_alloc_object_from_code) {
-func_art_portable_alloc_object_from_code = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_alloc_object_from_code", mod); // (external, no body)
-func_art_portable_alloc_object_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_alloc_object_from_code_PAL;
-func_art_portable_alloc_object_from_code->setAttributes(func_art_portable_alloc_object_from_code_PAL);
-
-Function* func_art_portable_alloc_object_from_code_with_access_check = mod->getFunction("art_portable_alloc_object_from_code_with_access_check");
-if (!func_art_portable_alloc_object_from_code_with_access_check) {
-func_art_portable_alloc_object_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_alloc_object_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_alloc_object_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_alloc_object_from_code_with_access_check_PAL;
-func_art_portable_alloc_object_from_code_with_access_check->setAttributes(func_art_portable_alloc_object_from_code_with_access_check_PAL);
-
-Function* func_art_portable_alloc_array_from_code = mod->getFunction("art_portable_alloc_array_from_code");
-if (!func_art_portable_alloc_array_from_code) {
-func_art_portable_alloc_array_from_code = Function::Create(
- /*Type=*/FuncTy_14,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_alloc_array_from_code", mod); // (external, no body)
-func_art_portable_alloc_array_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_alloc_array_from_code_PAL;
-func_art_portable_alloc_array_from_code->setAttributes(func_art_portable_alloc_array_from_code_PAL);
-
-Function* func_art_portable_alloc_array_from_code_with_access_check = mod->getFunction("art_portable_alloc_array_from_code_with_access_check");
-if (!func_art_portable_alloc_array_from_code_with_access_check) {
-func_art_portable_alloc_array_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_14,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_alloc_array_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_alloc_array_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_alloc_array_from_code_with_access_check_PAL;
-func_art_portable_alloc_array_from_code_with_access_check->setAttributes(func_art_portable_alloc_array_from_code_with_access_check_PAL);
-
-Function* func_art_portable_check_and_alloc_array_from_code = mod->getFunction("art_portable_check_and_alloc_array_from_code");
-if (!func_art_portable_check_and_alloc_array_from_code) {
-func_art_portable_check_and_alloc_array_from_code = Function::Create(
- /*Type=*/FuncTy_14,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_check_and_alloc_array_from_code", mod); // (external, no body)
-func_art_portable_check_and_alloc_array_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_check_and_alloc_array_from_code_PAL;
-func_art_portable_check_and_alloc_array_from_code->setAttributes(func_art_portable_check_and_alloc_array_from_code_PAL);
-
-Function* func_art_portable_check_and_alloc_array_from_code_with_access_check = mod->getFunction("art_portable_check_and_alloc_array_from_code_with_access_check");
-if (!func_art_portable_check_and_alloc_array_from_code_with_access_check) {
-func_art_portable_check_and_alloc_array_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_14,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_check_and_alloc_array_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_check_and_alloc_array_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_check_and_alloc_array_from_code_with_access_check_PAL;
-func_art_portable_check_and_alloc_array_from_code_with_access_check->setAttributes(func_art_portable_check_and_alloc_array_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_instance_field_from_code = mod->getFunction("art_portable_find_instance_field_from_code");
-if (!func_art_portable_find_instance_field_from_code) {
-func_art_portable_find_instance_field_from_code = Function::Create(
- /*Type=*/FuncTy_15,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_instance_field_from_code", mod); // (external, no body)
-func_art_portable_find_instance_field_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_instance_field_from_code_PAL;
-func_art_portable_find_instance_field_from_code->setAttributes(func_art_portable_find_instance_field_from_code_PAL);
-
-Function* func_art_portable_find_static_field_from_code = mod->getFunction("art_portable_find_static_field_from_code");
-if (!func_art_portable_find_static_field_from_code) {
-func_art_portable_find_static_field_from_code = Function::Create(
- /*Type=*/FuncTy_15,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_static_field_from_code", mod); // (external, no body)
-func_art_portable_find_static_field_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_static_field_from_code_PAL;
-func_art_portable_find_static_field_from_code->setAttributes(func_art_portable_find_static_field_from_code_PAL);
-
-Function* func_art_portable_find_static_method_from_code_with_access_check = mod->getFunction("art_portable_find_static_method_from_code_with_access_check");
-if (!func_art_portable_find_static_method_from_code_with_access_check) {
-func_art_portable_find_static_method_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_static_method_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_find_static_method_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_static_method_from_code_with_access_check_PAL;
-func_art_portable_find_static_method_from_code_with_access_check->setAttributes(func_art_portable_find_static_method_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_direct_method_from_code_with_access_check = mod->getFunction("art_portable_find_direct_method_from_code_with_access_check");
-if (!func_art_portable_find_direct_method_from_code_with_access_check) {
-func_art_portable_find_direct_method_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_direct_method_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_find_direct_method_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_direct_method_from_code_with_access_check_PAL;
-func_art_portable_find_direct_method_from_code_with_access_check->setAttributes(func_art_portable_find_direct_method_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_virtual_method_from_code_with_access_check = mod->getFunction("art_portable_find_virtual_method_from_code_with_access_check");
-if (!func_art_portable_find_virtual_method_from_code_with_access_check) {
-func_art_portable_find_virtual_method_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_virtual_method_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_find_virtual_method_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_virtual_method_from_code_with_access_check_PAL;
-func_art_portable_find_virtual_method_from_code_with_access_check->setAttributes(func_art_portable_find_virtual_method_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_super_method_from_code_with_access_check = mod->getFunction("art_portable_find_super_method_from_code_with_access_check");
-if (!func_art_portable_find_super_method_from_code_with_access_check) {
-func_art_portable_find_super_method_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_super_method_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_find_super_method_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_super_method_from_code_with_access_check_PAL;
-func_art_portable_find_super_method_from_code_with_access_check->setAttributes(func_art_portable_find_super_method_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_interface_method_from_code_with_access_check = mod->getFunction("art_portable_find_interface_method_from_code_with_access_check");
-if (!func_art_portable_find_interface_method_from_code_with_access_check) {
-func_art_portable_find_interface_method_from_code_with_access_check = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_interface_method_from_code_with_access_check", mod); // (external, no body)
-func_art_portable_find_interface_method_from_code_with_access_check->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_interface_method_from_code_with_access_check_PAL;
-func_art_portable_find_interface_method_from_code_with_access_check->setAttributes(func_art_portable_find_interface_method_from_code_with_access_check_PAL);
-
-Function* func_art_portable_find_interface_method_from_code = mod->getFunction("art_portable_find_interface_method_from_code");
-if (!func_art_portable_find_interface_method_from_code) {
-func_art_portable_find_interface_method_from_code = Function::Create(
- /*Type=*/FuncTy_16,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_find_interface_method_from_code", mod); // (external, no body)
-func_art_portable_find_interface_method_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_find_interface_method_from_code_PAL;
-func_art_portable_find_interface_method_from_code->setAttributes(func_art_portable_find_interface_method_from_code_PAL);
-
-Function* func_art_portable_initialize_static_storage_from_code = mod->getFunction("art_portable_initialize_static_storage_from_code");
-if (!func_art_portable_initialize_static_storage_from_code) {
-func_art_portable_initialize_static_storage_from_code = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_initialize_static_storage_from_code", mod); // (external, no body)
-func_art_portable_initialize_static_storage_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_initialize_static_storage_from_code_PAL;
-func_art_portable_initialize_static_storage_from_code->setAttributes(func_art_portable_initialize_static_storage_from_code_PAL);
-
-Function* func_art_portable_initialize_type_from_code = mod->getFunction("art_portable_initialize_type_from_code");
-if (!func_art_portable_initialize_type_from_code) {
-func_art_portable_initialize_type_from_code = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_initialize_type_from_code", mod); // (external, no body)
-func_art_portable_initialize_type_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_initialize_type_from_code_PAL;
-func_art_portable_initialize_type_from_code->setAttributes(func_art_portable_initialize_type_from_code_PAL);
-
-Function* func_art_portable_initialize_type_and_verify_access_from_code = mod->getFunction("art_portable_initialize_type_and_verify_access_from_code");
-if (!func_art_portable_initialize_type_and_verify_access_from_code) {
-func_art_portable_initialize_type_and_verify_access_from_code = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_initialize_type_and_verify_access_from_code", mod); // (external, no body)
-func_art_portable_initialize_type_and_verify_access_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_initialize_type_and_verify_access_from_code_PAL;
-func_art_portable_initialize_type_and_verify_access_from_code->setAttributes(func_art_portable_initialize_type_and_verify_access_from_code_PAL);
-
-Function* func_art_portable_resolve_string_from_code = mod->getFunction("art_portable_resolve_string_from_code");
-if (!func_art_portable_resolve_string_from_code) {
-func_art_portable_resolve_string_from_code = Function::Create(
- /*Type=*/FuncTy_17,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_resolve_string_from_code", mod); // (external, no body)
-func_art_portable_resolve_string_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_resolve_string_from_code_PAL;
-func_art_portable_resolve_string_from_code->setAttributes(func_art_portable_resolve_string_from_code_PAL);
-
-Function* func_art_portable_set32_static_from_code = mod->getFunction("art_portable_set32_static_from_code");
-if (!func_art_portable_set32_static_from_code) {
-func_art_portable_set32_static_from_code = Function::Create(
- /*Type=*/FuncTy_18,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set32_static_from_code", mod); // (external, no body)
-func_art_portable_set32_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set32_static_from_code_PAL;
-func_art_portable_set32_static_from_code->setAttributes(func_art_portable_set32_static_from_code_PAL);
-
-Function* func_art_portable_set64_static_from_code = mod->getFunction("art_portable_set64_static_from_code");
-if (!func_art_portable_set64_static_from_code) {
-func_art_portable_set64_static_from_code = Function::Create(
- /*Type=*/FuncTy_19,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set64_static_from_code", mod); // (external, no body)
-func_art_portable_set64_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set64_static_from_code_PAL;
-func_art_portable_set64_static_from_code->setAttributes(func_art_portable_set64_static_from_code_PAL);
-
-Function* func_art_portable_set_obj_static_from_code = mod->getFunction("art_portable_set_obj_static_from_code");
-if (!func_art_portable_set_obj_static_from_code) {
-func_art_portable_set_obj_static_from_code = Function::Create(
- /*Type=*/FuncTy_20,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set_obj_static_from_code", mod); // (external, no body)
-func_art_portable_set_obj_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set_obj_static_from_code_PAL;
-func_art_portable_set_obj_static_from_code->setAttributes(func_art_portable_set_obj_static_from_code_PAL);
-
-Function* func_art_portable_get32_static_from_code = mod->getFunction("art_portable_get32_static_from_code");
-if (!func_art_portable_get32_static_from_code) {
-func_art_portable_get32_static_from_code = Function::Create(
- /*Type=*/FuncTy_21,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get32_static_from_code", mod); // (external, no body)
-func_art_portable_get32_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get32_static_from_code_PAL;
-func_art_portable_get32_static_from_code->setAttributes(func_art_portable_get32_static_from_code_PAL);
-
-Function* func_art_portable_get64_static_from_code = mod->getFunction("art_portable_get64_static_from_code");
-if (!func_art_portable_get64_static_from_code) {
-func_art_portable_get64_static_from_code = Function::Create(
- /*Type=*/FuncTy_22,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get64_static_from_code", mod); // (external, no body)
-func_art_portable_get64_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get64_static_from_code_PAL;
-func_art_portable_get64_static_from_code->setAttributes(func_art_portable_get64_static_from_code_PAL);
-
-Function* func_art_portable_get_obj_static_from_code = mod->getFunction("art_portable_get_obj_static_from_code");
-if (!func_art_portable_get_obj_static_from_code) {
-func_art_portable_get_obj_static_from_code = Function::Create(
- /*Type=*/FuncTy_23,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get_obj_static_from_code", mod); // (external, no body)
-func_art_portable_get_obj_static_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get_obj_static_from_code_PAL;
-func_art_portable_get_obj_static_from_code->setAttributes(func_art_portable_get_obj_static_from_code_PAL);
-
-Function* func_art_portable_set32_instance_from_code = mod->getFunction("art_portable_set32_instance_from_code");
-if (!func_art_portable_set32_instance_from_code) {
-func_art_portable_set32_instance_from_code = Function::Create(
- /*Type=*/FuncTy_24,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set32_instance_from_code", mod); // (external, no body)
-func_art_portable_set32_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set32_instance_from_code_PAL;
-func_art_portable_set32_instance_from_code->setAttributes(func_art_portable_set32_instance_from_code_PAL);
-
-Function* func_art_portable_set64_instance_from_code = mod->getFunction("art_portable_set64_instance_from_code");
-if (!func_art_portable_set64_instance_from_code) {
-func_art_portable_set64_instance_from_code = Function::Create(
- /*Type=*/FuncTy_25,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set64_instance_from_code", mod); // (external, no body)
-func_art_portable_set64_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set64_instance_from_code_PAL;
-func_art_portable_set64_instance_from_code->setAttributes(func_art_portable_set64_instance_from_code_PAL);
-
-Function* func_art_portable_set_obj_instance_from_code = mod->getFunction("art_portable_set_obj_instance_from_code");
-if (!func_art_portable_set_obj_instance_from_code) {
-func_art_portable_set_obj_instance_from_code = Function::Create(
- /*Type=*/FuncTy_26,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_set_obj_instance_from_code", mod); // (external, no body)
-func_art_portable_set_obj_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_set_obj_instance_from_code_PAL;
-func_art_portable_set_obj_instance_from_code->setAttributes(func_art_portable_set_obj_instance_from_code_PAL);
-
-Function* func_art_portable_get32_instance_from_code = mod->getFunction("art_portable_get32_instance_from_code");
-if (!func_art_portable_get32_instance_from_code) {
-func_art_portable_get32_instance_from_code = Function::Create(
- /*Type=*/FuncTy_20,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get32_instance_from_code", mod); // (external, no body)
-func_art_portable_get32_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get32_instance_from_code_PAL;
-func_art_portable_get32_instance_from_code->setAttributes(func_art_portable_get32_instance_from_code_PAL);
-
-Function* func_art_portable_get64_instance_from_code = mod->getFunction("art_portable_get64_instance_from_code");
-if (!func_art_portable_get64_instance_from_code) {
-func_art_portable_get64_instance_from_code = Function::Create(
- /*Type=*/FuncTy_27,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get64_instance_from_code", mod); // (external, no body)
-func_art_portable_get64_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get64_instance_from_code_PAL;
-func_art_portable_get64_instance_from_code->setAttributes(func_art_portable_get64_instance_from_code_PAL);
-
-Function* func_art_portable_get_obj_instance_from_code = mod->getFunction("art_portable_get_obj_instance_from_code");
-if (!func_art_portable_get_obj_instance_from_code) {
-func_art_portable_get_obj_instance_from_code = Function::Create(
- /*Type=*/FuncTy_13,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_get_obj_instance_from_code", mod); // (external, no body)
-func_art_portable_get_obj_instance_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_get_obj_instance_from_code_PAL;
-func_art_portable_get_obj_instance_from_code->setAttributes(func_art_portable_get_obj_instance_from_code_PAL);
-
-Function* func_art_portable_decode_jobject_in_thread = mod->getFunction("art_portable_decode_jobject_in_thread");
-if (!func_art_portable_decode_jobject_in_thread) {
-func_art_portable_decode_jobject_in_thread = Function::Create(
- /*Type=*/FuncTy_28,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_decode_jobject_in_thread", mod); // (external, no body)
-func_art_portable_decode_jobject_in_thread->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_decode_jobject_in_thread_PAL;
-func_art_portable_decode_jobject_in_thread->setAttributes(func_art_portable_decode_jobject_in_thread_PAL);
-
-Function* func_art_portable_fill_array_data_from_code = mod->getFunction("art_portable_fill_array_data_from_code");
-if (!func_art_portable_fill_array_data_from_code) {
-func_art_portable_fill_array_data_from_code = Function::Create(
- /*Type=*/FuncTy_29,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_fill_array_data_from_code", mod); // (external, no body)
-func_art_portable_fill_array_data_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_fill_array_data_from_code_PAL;
-func_art_portable_fill_array_data_from_code->setAttributes(func_art_portable_fill_array_data_from_code_PAL);
-
-Function* func_art_portable_is_assignable_from_code = mod->getFunction("art_portable_is_assignable_from_code");
-if (!func_art_portable_is_assignable_from_code) {
-func_art_portable_is_assignable_from_code = Function::Create(
- /*Type=*/FuncTy_30,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_is_assignable_from_code", mod); // (external, no body)
-func_art_portable_is_assignable_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_is_assignable_from_code_PAL;
-func_art_portable_is_assignable_from_code->setAttributes(func_art_portable_is_assignable_from_code_PAL);
-
-Function* func_art_portable_check_cast_from_code = mod->getFunction("art_portable_check_cast_from_code");
-if (!func_art_portable_check_cast_from_code) {
-func_art_portable_check_cast_from_code = Function::Create(
- /*Type=*/FuncTy_5,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_check_cast_from_code", mod); // (external, no body)
-func_art_portable_check_cast_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_check_cast_from_code_PAL;
-func_art_portable_check_cast_from_code->setAttributes(func_art_portable_check_cast_from_code_PAL);
-
-Function* func_art_portable_check_put_array_element_from_code = mod->getFunction("art_portable_check_put_array_element_from_code");
-if (!func_art_portable_check_put_array_element_from_code) {
-func_art_portable_check_put_array_element_from_code = Function::Create(
- /*Type=*/FuncTy_5,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_check_put_array_element_from_code", mod); // (external, no body)
-func_art_portable_check_put_array_element_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_check_put_array_element_from_code_PAL;
-func_art_portable_check_put_array_element_from_code->setAttributes(func_art_portable_check_put_array_element_from_code_PAL);
-
-Function* func_art_d2l = mod->getFunction("art_d2l");
-if (!func_art_d2l) {
-func_art_d2l = Function::Create(
- /*Type=*/FuncTy_31,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_d2l", mod); // (external, no body)
-func_art_d2l->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_d2l_PAL;
-func_art_d2l->setAttributes(func_art_d2l_PAL);
-
-Function* func_art_d2i = mod->getFunction("art_d2i");
-if (!func_art_d2i) {
-func_art_d2i = Function::Create(
- /*Type=*/FuncTy_32,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_d2i", mod); // (external, no body)
-func_art_d2i->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_d2i_PAL;
-func_art_d2i->setAttributes(func_art_d2i_PAL);
-
-Function* func_art_f2l = mod->getFunction("art_f2l");
-if (!func_art_f2l) {
-func_art_f2l = Function::Create(
- /*Type=*/FuncTy_33,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_f2l", mod); // (external, no body)
-func_art_f2l->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_f2l_PAL;
-func_art_f2l->setAttributes(func_art_f2l_PAL);
-
-Function* func_art_f2i = mod->getFunction("art_f2i");
-if (!func_art_f2i) {
-func_art_f2i = Function::Create(
- /*Type=*/FuncTy_34,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_f2i", mod); // (external, no body)
-func_art_f2i->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_f2i_PAL;
-func_art_f2i->setAttributes(func_art_f2i_PAL);
-
-Function* func_art_portable_jni_method_start = mod->getFunction("art_portable_jni_method_start");
-if (!func_art_portable_jni_method_start) {
-func_art_portable_jni_method_start = Function::Create(
- /*Type=*/FuncTy_35,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_start", mod); // (external, no body)
-func_art_portable_jni_method_start->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_start_PAL;
-func_art_portable_jni_method_start->setAttributes(func_art_portable_jni_method_start_PAL);
-
-Function* func_art_portable_jni_method_start_synchronized = mod->getFunction("art_portable_jni_method_start_synchronized");
-if (!func_art_portable_jni_method_start_synchronized) {
-func_art_portable_jni_method_start_synchronized = Function::Create(
- /*Type=*/FuncTy_30,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_start_synchronized", mod); // (external, no body)
-func_art_portable_jni_method_start_synchronized->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_start_synchronized_PAL;
-func_art_portable_jni_method_start_synchronized->setAttributes(func_art_portable_jni_method_start_synchronized_PAL);
-
-Function* func_art_portable_jni_method_end = mod->getFunction("art_portable_jni_method_end");
-if (!func_art_portable_jni_method_end) {
-func_art_portable_jni_method_end = Function::Create(
- /*Type=*/FuncTy_15,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_end", mod); // (external, no body)
-func_art_portable_jni_method_end->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_end_PAL;
-func_art_portable_jni_method_end->setAttributes(func_art_portable_jni_method_end_PAL);
-
-Function* func_art_portable_jni_method_end_synchronized = mod->getFunction("art_portable_jni_method_end_synchronized");
-if (!func_art_portable_jni_method_end_synchronized) {
-func_art_portable_jni_method_end_synchronized = Function::Create(
- /*Type=*/FuncTy_36,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_end_synchronized", mod); // (external, no body)
-func_art_portable_jni_method_end_synchronized->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_end_synchronized_PAL;
-func_art_portable_jni_method_end_synchronized->setAttributes(func_art_portable_jni_method_end_synchronized_PAL);
-
-Function* func_art_portable_jni_method_end_with_reference = mod->getFunction("art_portable_jni_method_end_with_reference");
-if (!func_art_portable_jni_method_end_with_reference) {
-func_art_portable_jni_method_end_with_reference = Function::Create(
- /*Type=*/FuncTy_37,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_end_with_reference", mod); // (external, no body)
-func_art_portable_jni_method_end_with_reference->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_end_with_reference_PAL;
-func_art_portable_jni_method_end_with_reference->setAttributes(func_art_portable_jni_method_end_with_reference_PAL);
-
-Function* func_art_portable_jni_method_end_with_reference_synchronized = mod->getFunction("art_portable_jni_method_end_with_reference_synchronized");
-if (!func_art_portable_jni_method_end_with_reference_synchronized) {
-func_art_portable_jni_method_end_with_reference_synchronized = Function::Create(
- /*Type=*/FuncTy_38,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_jni_method_end_with_reference_synchronized", mod); // (external, no body)
-func_art_portable_jni_method_end_with_reference_synchronized->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_jni_method_end_with_reference_synchronized_PAL;
-func_art_portable_jni_method_end_with_reference_synchronized->setAttributes(func_art_portable_jni_method_end_with_reference_synchronized_PAL);
-
-Function* func_art_portable_is_exception_pending_from_code = mod->getFunction("art_portable_is_exception_pending_from_code");
-if (!func_art_portable_is_exception_pending_from_code) {
-func_art_portable_is_exception_pending_from_code = Function::Create(
- /*Type=*/FuncTy_39,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_is_exception_pending_from_code", mod); // (external, no body)
-func_art_portable_is_exception_pending_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_is_exception_pending_from_code_PAL;
-func_art_portable_is_exception_pending_from_code->setAttributes(func_art_portable_is_exception_pending_from_code_PAL);
-
-Function* func_art_portable_mark_gc_card_from_code = mod->getFunction("art_portable_mark_gc_card_from_code");
-if (!func_art_portable_mark_gc_card_from_code) {
-func_art_portable_mark_gc_card_from_code = Function::Create(
- /*Type=*/FuncTy_5,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_mark_gc_card_from_code", mod); // (external, no body)
-func_art_portable_mark_gc_card_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_mark_gc_card_from_code_PAL;
-func_art_portable_mark_gc_card_from_code->setAttributes(func_art_portable_mark_gc_card_from_code_PAL);
-
-Function* func_art_portable_proxy_invoke_handler_from_code = mod->getFunction("art_portable_proxy_invoke_handler_from_code");
-if (!func_art_portable_proxy_invoke_handler_from_code) {
-func_art_portable_proxy_invoke_handler_from_code = Function::Create(
- /*Type=*/FuncTy_40,
- /*Linkage=*/GlobalValue::ExternalLinkage,
- /*Name=*/"art_portable_proxy_invoke_handler_from_code", mod); // (external, no body)
-func_art_portable_proxy_invoke_handler_from_code->setCallingConv(CallingConv::C);
-}
-AttributeSet func_art_portable_proxy_invoke_handler_from_code_PAL;
-func_art_portable_proxy_invoke_handler_from_code->setAttributes(func_art_portable_proxy_invoke_handler_from_code_PAL);
-
-// Global Variable Declarations
-
-
-// Constant Definitions
-
-// Global Variable Definitions
-
-// Function Definitions
-
-return mod;
-
-}
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/intrinsic_func_list.def b/compiler/llvm/intrinsic_func_list.def
deleted file mode 100644
index 887a62666f..0000000000
--- a/compiler/llvm/intrinsic_func_list.def
+++ /dev/null
@@ -1,1796 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE,
-// ARG1_TYPE, ARG2_TYPE, ARG3_TYPE, ARG4_TYPE, ARG5_TYPE)
-#ifndef DEF_INTRINSICS_FUNC
-# error "missing DEF_INTRINSICS_FUNC definition!"
-#endif
-
-#define _EVAL_DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE, ...) \
- DEF_INTRINSICS_FUNC(ID, NAME, ATTR, RET_TYPE, __VA_ARGS__)
-
-#define _EXPAND_ARG0() kNone, kNone, kNone, kNone, kNone
-#define _EXPAND_ARG1(ARG1) ARG1, kNone, kNone, kNone, kNone
-#define _EXPAND_ARG2(ARG1, ARG2) ARG1, ARG2, kNone, kNone, kNone
-#define _EXPAND_ARG3(ARG1, ARG2, ARG3) ARG1, ARG2, ARG3, kNone, kNone
-#define _EXPAND_ARG4(ARG1, ARG2, ARG3, ARG4) ARG1, ARG2, ARG3, ARG4, kNone
-#define _EXPAND_ARG5(ARG1, ARG2, ARG3, ARG4, ARG5) \
- ARG1, ARG2, ARG3, ARG4, ARG5
-
-#define _JTYPE(TYPE, SPACE) _JTYPE_OF_ ## TYPE ## _UNDER_ ## SPACE
-
-// Note: These should be consistent with the type return from
-// IRBuilder::GetJType([type], kArray).
-#define _JTYPE_OF_kInt1Ty_UNDER_kArray kInt8Ty
-#define _JTYPE_OF_kInt8Ty_UNDER_kArray kInt8Ty
-#define _JTYPE_OF_kInt16Ty_UNDER_kArray kInt16Ty
-#define _JTYPE_OF_kInt32Ty_UNDER_kArray kInt32Ty
-#define _JTYPE_OF_kInt64Ty_UNDER_kArray kInt64Ty
-#define _JTYPE_OF_kJavaObjectTy_UNDER_kArray kJavaObjectTy
-
-// Note: These should be consistent with the type return from
-// IRBuilder::GetJType([type], kField).
-#define _JTYPE_OF_kInt1Ty_UNDER_kField kInt32Ty
-#define _JTYPE_OF_kInt8Ty_UNDER_kField kInt32Ty
-#define _JTYPE_OF_kInt16Ty_UNDER_kField kInt32Ty
-#define _JTYPE_OF_kInt32Ty_UNDER_kField kInt32Ty
-#define _JTYPE_OF_kInt64Ty_UNDER_kField kInt64Ty
-#define _JTYPE_OF_kJavaObjectTy_UNDER_kField kJavaObjectTy
-
-//----------------------------------------------------------------------------
-// Thread
-//----------------------------------------------------------------------------
-
-// Thread* art_portable_get_current_thread()
-_EVAL_DEF_INTRINSICS_FUNC(GetCurrentThread,
- art_portable_get_current_thread,
- kAttrReadNone | kAttrNoThrow,
- kJavaThreadTy,
- _EXPAND_ARG0())
-
-// void art_portable_test_suspend(Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(TestSuspend,
- art_portable_test_suspend,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG1(kJavaThreadTy))
-
-// void art_portable_check_suspend() /* Expands to GetCurrentThread/TestSuspend */
-_EVAL_DEF_INTRINSICS_FUNC(CheckSuspend,
- art_portable_check_suspend,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG0())
-
-// void art_portable_mark_gc_card(Object* new_value, Object* object)
-_EVAL_DEF_INTRINSICS_FUNC(MarkGCCard,
- art_portable_mark_gc_card,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
-
-//----------------------------------------------------------------------------
-// Exception
-//----------------------------------------------------------------------------
-
-// Should not expand - introduces the catch targets for a potentially
-// throwing instruction. The result is a switch key and this
-// instruction will be followed by a switch statement. The catch
-// targets will be enumerated as cases of the switch, with the fallthrough
-// designating the block containing the potentially throwing instruction.
-// bool art_portable_catch_targets(int dex_pc)
-_EVAL_DEF_INTRINSICS_FUNC(CatchTargets,
- art_portable_catch_targets,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// void art_portable_throw_exception(JavaObject* exception)
-_EVAL_DEF_INTRINSICS_FUNC(ThrowException,
- art_portable_throw_exception,
- kAttrDoThrow,
- kVoidTy,
- _EXPAND_ARG1(kJavaObjectTy))
-
-// void art_portable_hl_throw_exception(JavaObject* exception)
-_EVAL_DEF_INTRINSICS_FUNC(HLThrowException,
- art_portable_hl_throw_exception,
- kAttrDoThrow,
- kVoidTy,
- _EXPAND_ARG1(kJavaObjectTy))
-
-// JavaObject* art_portable_get_current_exception()
-_EVAL_DEF_INTRINSICS_FUNC(GetException,
- art_portable_get_current_exception,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG0())
-
-// bool art_portable_is_exception_pending()
-_EVAL_DEF_INTRINSICS_FUNC(IsExceptionPending,
- art_portable_is_exception_pending,
- kAttrReadOnly | kAttrNoThrow,
- kInt1Ty,
- _EXPAND_ARG0())
-
-// int art_portable_find_catch_block(Method* method, int try_item_offset)
-_EVAL_DEF_INTRINSICS_FUNC(FindCatchBlock,
- art_portable_find_catch_block,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kJavaMethodTy, kInt32ConstantTy))
-
-// void art_portable_throw_div_zero()
-_EVAL_DEF_INTRINSICS_FUNC(ThrowDivZeroException,
- art_portable_throw_div_zero,
- kAttrDoThrow,
- kVoidTy,
- _EXPAND_ARG0())
-
-// void art_portable_throw_null_pointer_exception(uint32_t dex_pc)
-_EVAL_DEF_INTRINSICS_FUNC(ThrowNullPointerException,
- art_portable_throw_null_pointer_exception,
- kAttrDoThrow,
- kVoidTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// void art_portable_throw_array_bounds(int index, int array_len)
-_EVAL_DEF_INTRINSICS_FUNC(ThrowIndexOutOfBounds,
- art_portable_throw_array_bounds,
- kAttrDoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-//----------------------------------------------------------------------------
-// ConstString
-//----------------------------------------------------------------------------
-
-// JavaObject* art_portable_const_string(uint32_t string_idx)
-_EVAL_DEF_INTRINSICS_FUNC(ConstString,
- art_portable_const_string,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// JavaObject* art_portable_load_string_from_dex_cache(Method* method, uint32_t string_idx)
-_EVAL_DEF_INTRINSICS_FUNC(LoadStringFromDexCache,
- art_portable_load_string_from_dex_cache,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// JavaObject* art_portable_resolve_string(Method* method, uint32_t string_idx)
-_EVAL_DEF_INTRINSICS_FUNC(ResolveString,
- art_portable_resolve_string,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG2(kJavaMethodTy, kInt32ConstantTy))
-
-//----------------------------------------------------------------------------
-// ConstClass
-//----------------------------------------------------------------------------
-
-// JavaObject* art_portable_const_class(uint32_t type_idx)
-_EVAL_DEF_INTRINSICS_FUNC(ConstClass,
- art_portable_const_class,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// JavaObject* art_portable_initialize_type_and_verify_access(uint32_t type_idx,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(InitializeTypeAndVerifyAccess,
- art_portable_initialize_type_and_verify_access,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
-
-// JavaObject* art_portable_load_type_from_dex_cache(uint32_t type_idx)
-_EVAL_DEF_INTRINSICS_FUNC(LoadTypeFromDexCache,
- art_portable_load_type_from_dex_cache,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// JavaObject* art_portable_initialize_type(uint32_t type_idx,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(InitializeType,
- art_portable_initialize_type,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
-
-//----------------------------------------------------------------------------
-// Lock
-//----------------------------------------------------------------------------
-
-// void art_portable_lock_object(JavaObject* obj, Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(LockObject,
- art_portable_lock_object,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kJavaObjectTy, kJavaThreadTy))
-
-// void art_portable_unlock_object(JavaObject* obj, Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(UnlockObject,
- art_portable_unlock_object,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG2(kJavaObjectTy, kJavaThreadTy))
-
-//----------------------------------------------------------------------------
-// Cast
-//----------------------------------------------------------------------------
-
-// void art_portable_check_cast(JavaObject* dest_type, JavaObject* src_type)
-_EVAL_DEF_INTRINSICS_FUNC(CheckCast,
- art_portable_check_cast,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
-
-// void art_portable_hl_check_cast(uint32_t type_idx, JavaObject* obj)
-_EVAL_DEF_INTRINSICS_FUNC(HLCheckCast,
- art_portable_hl_check_cast,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
-
-// int art_portable_is_assignable(JavaObject* dest_type, JavaObject* src_type)
-_EVAL_DEF_INTRINSICS_FUNC(IsAssignable,
- art_portable_is_assignable,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
-
-//----------------------------------------------------------------------------
-// Allocation
-//----------------------------------------------------------------------------
-
-// JavaObject* art_portable_alloc_object(uint32_t type_idx,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(AllocObject,
- art_portable_alloc_object,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
-
-// JavaObject* art_portable_alloc_object_with_access_check(uint32_t type_idx,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(AllocObjectWithAccessCheck,
- art_portable_alloc_object_with_access_check,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
-
-//----------------------------------------------------------------------------
-// Instance
-//----------------------------------------------------------------------------
-
-// JavaObject* art_portable_new_instance(uint32_t type_idx)
-_EVAL_DEF_INTRINSICS_FUNC(NewInstance,
- art_portable_new_instance,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32Ty))
-
-// bool art_portable_instance_of(uint32_t type_idx, JavaObject* ref)
-_EVAL_DEF_INTRINSICS_FUNC(InstanceOf,
- art_portable_instance_of,
- kAttrNone,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
-
-//----------------------------------------------------------------------------
-// Array
-//----------------------------------------------------------------------------
-
-// JavaObject* art_portable_new_array(uint32_t type_idx, uint32_t array_size)
-_EVAL_DEF_INTRINSICS_FUNC(NewArray,
- art_portable_new_array,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG2(kInt32ConstantTy, kInt32Ty))
-
-// uint32_t art_portable_opt_array_length(int32_t opt_flags, JavaObject* array)
-_EVAL_DEF_INTRINSICS_FUNC(OptArrayLength,
- art_portable_opt_array_length,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
-
-// uint32_t art_portable_array_length(JavaObject* array)
-_EVAL_DEF_INTRINSICS_FUNC(ArrayLength,
- art_portable_array_length,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kJavaObjectTy))
-
-// JavaObject* art_portable_alloc_array(uint32_t type_idx,
-// Method* referrer,
-// uint32_t length,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(AllocArray,
- art_portable_alloc_array,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32Ty, kJavaThreadTy))
-
-// JavaObject* art_portable_alloc_array_with_access_check(uint32_t type_idx,
-// Method* referrer,
-// uint32_t length,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(AllocArrayWithAccessCheck,
- art_portable_alloc_array_with_access_check,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32Ty, kJavaThreadTy))
-
-// JavaObject* art_portable_check_and_alloc_array(uint32_t type_idx,
-// Method* referrer,
-// uint32_t length,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(CheckAndAllocArray,
- art_portable_check_and_alloc_array,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32ConstantTy, kJavaThreadTy))
-
-// JavaObject* art_portable_check_and_alloc_array_with_access_check(uint32_t type_idx,
-// Method* referrer,
-// uint32_t length,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(CheckAndAllocArrayWithAccessCheck,
- art_portable_check_and_alloc_array_with_access_check,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kInt32ConstantTy, kJavaThreadTy))
-
-// art_portable_aget_* and art_portable_aput_* never generate exception since the
-// necessary checking on arguments (e.g., array and index) has already done
-// before invocation of these intrinsics.
-//
-// [type] void art_portable_aget_[type](JavaObject* array, uint32_t index)
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGet,
- art_portable_aget,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt32Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetWide,
- art_portable_aget_wide,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt64Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetObject,
- art_portable_aget_object,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kJavaObjectTy, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetBoolean,
- art_portable_aget_boolean,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt1Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetByte,
- art_portable_aget_byte,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt8Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetChar,
- art_portable_aget_char,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayGetShort,
- art_portable_aget_short,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kArray),
- _EXPAND_ARG2(kJavaObjectTy, kInt32Ty))
-
-// void art_portable_aput_[type]([type] value, JavaObject* array, uint32_t index)
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPut,
- art_portable_aput,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt32Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutWide,
- art_portable_aput_wide,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt64Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutObject,
- art_portable_aput_object,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kJavaObjectTy, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutBoolean,
- art_portable_aput_boolean,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt1Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutByte,
- art_portable_aput_byte,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt8Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutChar,
- art_portable_aput_char,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt16Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(ArrayPutShort,
- art_portable_aput_short,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(_JTYPE(kInt16Ty, kArray), kJavaObjectTy, kInt32Ty))
-
-// void art_portable_check_put_array_element(JavaObject* value, JavaObject* array)
-_EVAL_DEF_INTRINSICS_FUNC(CheckPutArrayElement,
- art_portable_check_put_array_element,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG2(kJavaObjectTy, kJavaObjectTy))
-
-// void art_portable_filled_new_array(Array* array,
-// uint32_t elem_jty, ...)
-_EVAL_DEF_INTRINSICS_FUNC(FilledNewArray,
- art_portable_filled_new_array,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kVarArgTy))
-
-// void art_portable_fill_array_data(Method* referrer,
-// uint32_t dex_pc,
-// Array* array,
-// uint32_t payload_offset)
-_EVAL_DEF_INTRINSICS_FUNC(FillArrayData,
- art_portable_fill_array_data,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kJavaMethodTy, kInt32ConstantTy, kJavaObjectTy, kInt32ConstantTy))
-
-// void art_portable_hl_fill_array_data(int32_t offset, JavaObject* array)
-_EVAL_DEF_INTRINSICS_FUNC(HLFillArrayData,
- art_portable_hl_fill_array_data,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
-
-//----------------------------------------------------------------------------
-// Instance Field
-//----------------------------------------------------------------------------
-
-// [type] art_portable_iget_[type](uint32_t field_idx,
-// Method* referrer,
-// JavaObject* obj)
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGet,
- art_portable_iget,
- kAttrNone,
- _JTYPE(kInt32Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetWide,
- art_portable_iget_wide,
- kAttrNone,
- _JTYPE(kInt64Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetObject,
- art_portable_iget_object,
- kAttrNone,
- _JTYPE(kJavaObjectTy, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetBoolean,
- art_portable_iget_boolean,
- kAttrNone,
- _JTYPE(kInt1Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetByte,
- art_portable_iget_byte,
- kAttrNone,
- _JTYPE(kInt8Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetChar,
- art_portable_iget_char,
- kAttrNone,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetShort,
- art_portable_iget_short,
- kAttrNone,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy))
-
-// [type] art_portable_iget_[type].fast(int field_offset,
-// bool is_volatile,
-// JavaObject* obj)
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetFast,
- art_portable_iget.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt32Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetWideFast,
- art_portable_iget_wide.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt64Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetObjectFast,
- art_portable_iget_object.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kJavaObjectTy, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetBooleanFast,
- art_portable_iget_boolean.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt1Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetByteFast,
- art_portable_iget_byte.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt8Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetCharFast,
- art_portable_iget_char.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldGetShortFast,
- art_portable_iget_short.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy))
-
-// void art_portable_iput_[type](uint32_t field_idx,
-// Method* referrer,
-// JavaObject* obj,
-// [type] new_value)
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPut,
- art_portable_iput,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt32Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutWide,
- art_portable_iput_wide,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt64Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutObject,
- art_portable_iput_object,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kJavaObjectTy, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutBoolean,
- art_portable_iput_boolean,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt1Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutByte,
- art_portable_iput_byte,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt8Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutChar,
- art_portable_iput_char,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutShort,
- art_portable_iput_short,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaMethodTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
-
-// void art_portable_iput_[type].fast(int field_offset,
-// bool is_volatile,
-// JavaObject* obj,
-// [type] new_value)
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutFast,
- art_portable_iput.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt32Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutWideFast,
- art_portable_iput_wide.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt64Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutObjectFast,
- art_portable_iput_object.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kJavaObjectTy, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutBooleanFast,
- art_portable_iput_boolean.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt1Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutByteFast,
- art_portable_iput_byte.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt8Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutCharFast,
- art_portable_iput_char.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(InstanceFieldPutShortFast,
- art_portable_iput_short.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32ConstantTy, kInt1ConstantTy, kJavaObjectTy, _JTYPE(kInt16Ty, kField)))
-
-//----------------------------------------------------------------------------
-// Static Field
-//----------------------------------------------------------------------------
-
-// [type] art_portable_sget_[type](uint32_t field_idx, Method* referrer)
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGet,
- art_portable_sget,
- kAttrNone,
- _JTYPE(kInt32Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetWide,
- art_portable_sget_wide,
- kAttrNone,
- _JTYPE(kInt64Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetObject,
- art_portable_sget_object,
- kAttrNone,
- _JTYPE(kJavaObjectTy, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetBoolean,
- art_portable_sget_boolean,
- kAttrNone,
- _JTYPE(kInt1Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetByte,
- art_portable_sget_byte,
- kAttrNone,
- _JTYPE(kInt8Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetChar,
- art_portable_sget_char,
- kAttrNone,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetShort,
- art_portable_sget_short,
- kAttrNone,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG2(kInt32ConstantTy, kJavaMethodTy))
-
-// [type] art_portable_sget_[type].fast(JavaObject* ssb,
-// int field_offset,
-// bool is_volatile)
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetFast,
- art_portable_sget.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt32Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetWideFast,
- art_portable_sget_wide.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt64Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetObjectFast,
- art_portable_sget_object.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kJavaObjectTy, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetBooleanFast,
- art_portable_sget_boolean.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt1Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetByteFast,
- art_portable_sget_byte.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt8Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetCharFast,
- art_portable_sget_char.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldGetShortFast,
- art_portable_sget_short.fast,
- kAttrReadOnly | kAttrNoThrow,
- _JTYPE(kInt16Ty, kField),
- _EXPAND_ARG3(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy))
-
-// void art_portable_sput_[type](uint32_t field_idx,
-// Method* referrer,
-// [type] new_value)
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPut,
- art_portable_sput,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt32Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutWide,
- art_portable_sput_wide,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt64Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutObject,
- art_portable_sput_object,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kJavaObjectTy, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutBoolean,
- art_portable_sput_boolean,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt1Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutByte,
- art_portable_sput_byte,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt8Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutChar,
- art_portable_sput_char,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt16Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutShort,
- art_portable_sput_short,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, _JTYPE(kInt16Ty, kField)))
-
-// void art_portable_sput_[type].fast(JavaObject* ssb,
-// int field_offset,
-// bool is_volatile,
-// [type] new_value)
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutFast,
- art_portable_sput.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt32Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutWideFast,
- art_portable_sput_wide.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt64Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutObjectFast,
- art_portable_sput_object.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kJavaObjectTy, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutBooleanFast,
- art_portable_sput_boolean.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt1Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutByteFast,
- art_portable_sput_byte.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt8Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutCharFast,
- art_portable_sput_char.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt16Ty, kField)))
-
-_EVAL_DEF_INTRINSICS_FUNC(StaticFieldPutShortFast,
- art_portable_sput_short.fast,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kJavaObjectTy, kInt32ConstantTy, kInt1ConstantTy, _JTYPE(kInt16Ty, kField)))
-
-// JavaObject* art_portable_load_declaring_class_ssb(Method* method)
-// Load the static storage base of the class that given method resides
-_EVAL_DEF_INTRINSICS_FUNC(LoadDeclaringClassSSB,
- art_portable_load_declaring_class_ssb,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kJavaMethodTy))
-
-// JavaObject* art_portable_init_and_load_class_ssb(uint32_t type_idx,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(InitializeAndLoadClassSSB,
- art_portable_init_and_load_class_ssb,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32ConstantTy, kJavaMethodTy, kJavaThreadTy))
-
-//----------------------------------------------------------------------------
-// High-level Array get/put
-//
-// Similar to art_portable_aget/aput_xxx, but checks not yet performed.
-// OptFlags contain info describing whether frontend has determined that
-// null check and/or array bounds check may be skipped.
-//
-// [type] void art_portable_hl_aget_[type](int optFlags, JavaObject* array, uint32_t index)
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGet,
- art_portable_hl_aget,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetFloat,
- art_portable_hl_aget_float,
- kAttrReadOnly | kAttrNoThrow,
- kFloatTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetWide,
- art_portable_hl_aget_wide,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetDouble,
- art_portable_hl_aget_double,
- kAttrReadOnly | kAttrNoThrow,
- kDoubleTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetObject,
- art_portable_hl_aget_object,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetBoolean,
- art_portable_hl_aget_boolean,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetByte,
- art_portable_hl_aget_byte,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetChar,
- art_portable_hl_aget_char,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayGetShort,
- art_portable_hl_aget_short,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-// void art_portable_aput_[type](int optFlags, [type] value, JavaObject* array, uint32_t index)
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPut,
- art_portable_hl_aput,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutFloat,
- art_portable_hl_aput_float,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kFloatTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutWide,
- art_portable_hl_aput_wide,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt64Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutDouble,
- art_portable_hl_aput_double,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kDoubleTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutObject,
- art_portable_hl_aput_object,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kJavaObjectTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutBoolean,
- art_portable_hl_aput_boolean,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutByte,
- art_portable_hl_aput_byte,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutChar,
- art_portable_hl_aput_char,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLArrayPutShort,
- art_portable_hl_aput_short,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-//----------------------------------------------------------------------------
-// High-level Instance get/put
-//
-// Similar to art_portable_iget/iput_xxx, but checks not yet performed.
-// OptFlags contain info describing whether frontend has determined that
-// null check may be skipped.
-//
-// [type] void art_portable_hl_iget_[type](int optFlags, JavaObject* obj, uint32_t field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLIGet,
- art_portable_hl_iget,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetFloat,
- art_portable_hl_iget_float,
- kAttrReadOnly | kAttrNoThrow,
- kFloatTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetWide,
- art_portable_hl_iget_wide,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetDouble,
- art_portable_hl_iget_double,
- kAttrReadOnly | kAttrNoThrow,
- kDoubleTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetObject,
- art_portable_hl_iget_object,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetBoolean,
- art_portable_hl_iget_boolean,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetByte,
- art_portable_hl_iget_byte,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetChar,
- art_portable_hl_iget_char,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIGetShort,
- art_portable_hl_iget_short,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG3(kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-// void art_portable_iput_[type](int optFlags, [type] value, JavaObject* obj, uint32_t field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLIPut,
- art_portable_hl_iput,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutFloat,
- art_portable_hl_iput_float,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kFloatTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutWide,
- art_portable_hl_iput_wide,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt64Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutDouble,
- art_portable_hl_iput_double,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kDoubleTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutObject,
- art_portable_hl_iput_object,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kJavaObjectTy, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutBoolean,
- art_portable_hl_iput_boolean,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutByte,
- art_portable_hl_iput_byte,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutChar,
- art_portable_hl_iput_char,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(HLIPutShort,
- art_portable_hl_iput_short,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG4(kInt32Ty, kInt32Ty, kJavaObjectTy, kInt32Ty))
-
-//----------------------------------------------------------------------------
-// High-level Invokes (fast-path determination not yet performed)
-//
-// NOTE: We expect these intrinsics to be temporary. Once calling conventions are
-// fully merged, the unified front end will lower down to the
-// InvokeRetxxx() intrinsics in the next section and these will be
-// removed.
-//
-// arg0: InvokeType [ignored if FilledNewArray]
-// arg1: method_idx [ignored if FilledNewArray]
-// arg2: optimization_flags (primary to note whether null checking is needed)
-// [arg3..argN]: actual arguments
-//----------------------------------------------------------------------------
-// INVOKE method returns void
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeVoid,
- art_portable_hl_invoke.void,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG1(kVarArgTy))
-
-// INVOKE method returns object
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeObj,
- art_portable_hl_invoke.obj,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG1(kVarArgTy))
-
-// INVOKE method returns int
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeInt,
- art_portable_hl_invoke.i32,
- kAttrNone,
- kInt32Ty,
- _EXPAND_ARG1(kVarArgTy))
-
-// INVOKE method returns float
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeFloat,
- art_portable_hl_invoke.f32,
- kAttrNone,
- kFloatTy,
- _EXPAND_ARG1(kVarArgTy))
-
-// INVOKE method returns long
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeLong,
- art_portable_hl_invoke.i64,
- kAttrNone,
- kInt64Ty,
- _EXPAND_ARG1(kVarArgTy))
-
-// INVOKE method returns double
-_EVAL_DEF_INTRINSICS_FUNC(HLInvokeDouble,
- art_portable_hl_invoke.f64,
- kAttrNone,
- kDoubleTy,
- _EXPAND_ARG1(kVarArgTy))
-
-// FILLED_NEW_ARRAY returns object
-_EVAL_DEF_INTRINSICS_FUNC(HLFilledNewArray,
- art_portable_hl_filled_new_array,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG1(kVarArgTy))
-
-//----------------------------------------------------------------------------
-// Invoke
-//----------------------------------------------------------------------------
-
-// Method* art_portable_find_static_method_with_access_check(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(FindStaticMethodWithAccessCheck,
- art_portable_find_static_method_with_access_check,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// Method* art_portable_find_direct_method_with_access_check(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(FindDirectMethodWithAccessCheck,
- art_portable_find_direct_method_with_access_check,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// Method* art_portable_find_virtual_method_with_access_check(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(FindVirtualMethodWithAccessCheck,
- art_portable_find_virtual_method_with_access_check,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// Method* art_portable_find_super_method_with_access_check(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(FindSuperMethodWithAccessCheck,
- art_portable_find_super_method_with_access_check,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// Method* art_portable_find_interface_method_with_access_check(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(FindInterfaceMethodWithAccessCheck,
- art_portable_find_interface_method_with_access_check,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// Method* art_portable_get_sd_callee_method_obj_addr(uint32_t method_idx)
-_EVAL_DEF_INTRINSICS_FUNC(GetSDCalleeMethodObjAddrFast,
- art_portable_get_sd_callee_method_obj_addr_fast,
- kAttrReadOnly | kAttrNoThrow,
- kJavaMethodTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// Method* art_portable_get_virtual_callee_method_obj_addr(uint32_t vtable_idx,
-// JavaObject* this)
-_EVAL_DEF_INTRINSICS_FUNC(GetVirtualCalleeMethodObjAddrFast,
- art_portable_get_virtual_callee_method_obj_addr_fast,
- kAttrReadOnly | kAttrNoThrow,
- kJavaMethodTy,
- _EXPAND_ARG2(kInt32ConstantTy, kJavaObjectTy))
-
-// Method* art_portable_get_interface_callee_method_obj_addr(uint32_t method_idx,
-// JavaObject* this,
-// Method* referrer,
-// Thread* thread)
-_EVAL_DEF_INTRINSICS_FUNC(GetInterfaceCalleeMethodObjAddrFast,
- art_portable_get_interface_callee_method_obj_addr_fast,
- kAttrNone,
- kJavaMethodTy,
- _EXPAND_ARG4(kInt32ConstantTy, kJavaObjectTy, kJavaMethodTy, kJavaThreadTy))
-
-// [type] art_portable_invoke.[type](Method* callee, ...)
-// INVOKE method returns void
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetVoid,
- art_portable_invoke.void,
- kAttrNone,
- kVoidTy,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type boolean
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetBoolean,
- art_portable_invoke.bool,
- kAttrNone,
- kInt1Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type byte
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetByte,
- art_portable_invoke.byte,
- kAttrNone,
- kInt8Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type char
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetChar,
- art_portable_invoke.char,
- kAttrNone,
- kInt16Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type short
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetShort,
- art_portable_invoke.short,
- kAttrNone,
- kInt16Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type int
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetInt,
- art_portable_invoke.int,
- kAttrNone,
- kInt32Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type long
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetLong,
- art_portable_invoke.long,
- kAttrNone,
- kInt64Ty,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type float
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetFloat,
- art_portable_invoke.float,
- kAttrNone,
- kFloatTy,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type double
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetDouble,
- art_portable_invoke.double,
- kAttrNone,
- kDoubleTy,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-// INVOKE method returns the value of type "object"
-_EVAL_DEF_INTRINSICS_FUNC(InvokeRetObject,
- art_portable_invoke.object,
- kAttrNone,
- kJavaObjectTy,
- _EXPAND_ARG2(kJavaMethodTy, kVarArgTy))
-
-//----------------------------------------------------------------------------
-// Math
-//----------------------------------------------------------------------------
-
-// int art_portable_{div,rem}_int(int a, int b)
-_EVAL_DEF_INTRINSICS_FUNC(DivInt,
- art_portable_div_int,
- kAttrReadNone | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(RemInt,
- art_portable_rem_int,
- kAttrReadNone | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// long art_portable_{div,rem}_long(long a, long b)
-_EVAL_DEF_INTRINSICS_FUNC(DivLong,
- art_portable_div_long,
- kAttrReadNone | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG2(kInt64Ty, kInt64Ty))
-
-_EVAL_DEF_INTRINSICS_FUNC(RemLong,
- art_portable_rem_long,
- kAttrReadNone | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG2(kInt64Ty, kInt64Ty))
-
-// int64_t art_portable_d2l(double f)
-_EVAL_DEF_INTRINSICS_FUNC(D2L,
- art_portable_d2l,
- kAttrReadNone | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG1(kDoubleTy))
-
-// int32_t art_portable_d2l(double f)
-_EVAL_DEF_INTRINSICS_FUNC(D2I,
- art_portable_d2i,
- kAttrReadNone | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kDoubleTy))
-
-// int64_t art_portable_f2l(float f)
-_EVAL_DEF_INTRINSICS_FUNC(F2L,
- art_portable_f2l,
- kAttrReadNone | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG1(kFloatTy))
-
-// int32_t art_portable_f2i(float f)
-_EVAL_DEF_INTRINSICS_FUNC(F2I,
- art_portable_f2i,
- kAttrReadNone | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kFloatTy))
-
-//----------------------------------------------------------------------------
-// sput intrinsics to assist MIR to Greenland_ir conversion.
-// "HL" versions - will be deprecated when fast/slow path handling done
-// in the common frontend.
-//----------------------------------------------------------------------------
-
-// void sput_hl(int field_idx, int val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSput,
- art_portable_hl_sput,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// void sput_hl_object(int field_idx, object* val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputObject,
- art_portable_hl_sput_object,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
-
-// void sput_hl_boolean(int field_idx, kInt1Ty)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputBoolean,
- art_portable_hl_sput_boolean,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// void sput_hl_byte(int field_idx, int val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputByte,
- art_portable_hl_sput_byte,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// void sput_hl_char(int field_idx, kInt16Ty val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputChar,
- art_portable_hl_sput_char,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// void sput_hl_short(int field_idx, int val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputShort,
- art_portable_hl_sput_short,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt32Ty))
-
-// void sput_hl_wide(int field_idx, long val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputWide,
- art_portable_hl_sput_wide,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kInt64Ty))
-
-// void sput_hl_double(int field_idx, double val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputDouble,
- art_portable_hl_sput_double,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kDoubleTy))
-
-// void sput_hl_float(int field_idx, float val)
-_EVAL_DEF_INTRINSICS_FUNC(HLSputFloat,
- art_portable_hl_sput_float,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kFloatTy))
-
-//----------------------------------------------------------------------------
-// sget intrinsics to assist MIR to Greenland_ir conversion.
-// "HL" versions - will be deprecated when fast/slow path handling done
-// in the common frontend.
-//----------------------------------------------------------------------------
-
-// int sget_hl(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSget,
- art_portable_hl_sget,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// object* sget_hl_object(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetObject,
- art_portable_hl_sget_object,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32Ty))
-
-// boolean sget_hl_boolean(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetBoolean,
- art_portable_hl_sget_boolean,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// byte sget_hl_byte(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetByte,
- art_portable_hl_sget_byte,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// char sget_hl_char(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetChar,
- art_portable_hl_sget_char,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// char sget_hl_short(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetShort,
- art_portable_hl_sget_short,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// char sget_hl_wide(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetWide,
- art_portable_hl_sget_wide,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// char sget_hl_double(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetDouble,
- art_portable_hl_sget_double,
- kAttrReadOnly | kAttrNoThrow,
- kDoubleTy,
- _EXPAND_ARG1(kInt32Ty))
-
-// char sget_hl_float(int field_idx)
-_EVAL_DEF_INTRINSICS_FUNC(HLSgetFloat,
- art_portable_hl_sget_float,
- kAttrReadOnly | kAttrNoThrow,
- kFloatTy,
- _EXPAND_ARG1(kInt32Ty))
-//----------------------------------------------------------------------------
-// Monitor enter/exit
-//----------------------------------------------------------------------------
-// uint32_t art_portable_monitor_enter(int optFlags, JavaObject* obj)
-_EVAL_DEF_INTRINSICS_FUNC(MonitorEnter,
- art_portable_monitor_enter,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
-
-// uint32_t art_portable_monitor_exit(int optFlags, JavaObject* obj)
-_EVAL_DEF_INTRINSICS_FUNC(MonitorExit,
- art_portable_monitor_exit,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32Ty, kJavaObjectTy))
-
-//----------------------------------------------------------------------------
-// Shadow Frame
-//----------------------------------------------------------------------------
-
-// void art_portable_alloca_shadow_frame(int num_entry)
-_EVAL_DEF_INTRINSICS_FUNC(AllocaShadowFrame,
- art_portable_alloca_shadow_frame,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-// void art_portable_set_vreg(int entry_idx, ...)
-_EVAL_DEF_INTRINSICS_FUNC(SetVReg,
- art_portable_set_vreg,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG2(kInt32ConstantTy, kVarArgTy))
-
-// void art_portable_pop_shadow_frame()
-_EVAL_DEF_INTRINSICS_FUNC(PopShadowFrame,
- art_portable_pop_shadow_frame,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG0())
-
-// void art_portable_update_dex_pc(uint32_t dex_pc)
-_EVAL_DEF_INTRINSICS_FUNC(UpdateDexPC,
- art_portable_update_dex_pc,
- kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG1(kInt32ConstantTy))
-
-//----------------------------------------------------------------------------
-// FP Comparison
-//----------------------------------------------------------------------------
-// int cmpl_float(float, float)
-_EVAL_DEF_INTRINSICS_FUNC(CmplFloat,
- art_portable_cmpl_float,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kFloatTy, kFloatTy))
-
-// int cmpg_float(float, float)
-_EVAL_DEF_INTRINSICS_FUNC(CmpgFloat,
- art_portable_cmpg_float,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kFloatTy, kFloatTy))
-
-// int cmpl_double(double, double)
-_EVAL_DEF_INTRINSICS_FUNC(CmplDouble,
- art_portable_cmpl_double,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kDoubleTy, kDoubleTy))
-
-// int cmpg_double(double, double)
-_EVAL_DEF_INTRINSICS_FUNC(CmpgDouble,
- art_portable_cmpg_double,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kDoubleTy, kDoubleTy))
-
-//----------------------------------------------------------------------------
-// Long Comparison
-//----------------------------------------------------------------------------
-// int cmp_long(long, long)
-_EVAL_DEF_INTRINSICS_FUNC(CmpLong,
- art_portable_cmp_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt64Ty, kInt64Ty))
-
-//----------------------------------------------------------------------------
-// Const intrinsics to assist MIR to Greenland_ir conversion. Should not materialize
-// For simplicity, all use integer input
-//----------------------------------------------------------------------------
-// int const_int(int)
-_EVAL_DEF_INTRINSICS_FUNC(ConstInt,
- art_portable_const_int,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// JavaObject* const_obj(int)
-_EVAL_DEF_INTRINSICS_FUNC(ConstObj,
- art_portable_const_obj,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kInt32Ty))
-
-// long const_long(long)
-_EVAL_DEF_INTRINSICS_FUNC(ConstLong,
- art_portable_const_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG1(kInt64Ty))
-
-// float const_float(int)
-_EVAL_DEF_INTRINSICS_FUNC(ConstFloat,
- art_portable_const_Float,
- kAttrReadOnly | kAttrNoThrow,
- kFloatTy,
- _EXPAND_ARG1(kInt32Ty))
-
-// double const_double(long)
-_EVAL_DEF_INTRINSICS_FUNC(ConstDouble,
- art_portable_const_Double,
- kAttrReadOnly | kAttrNoThrow,
- kDoubleTy,
- _EXPAND_ARG1(kInt64Ty))
-
-
-//----------------------------------------------------------------------------
-// Copy intrinsics to assist MIR to Greenland_ir conversion. Should not materialize
-//----------------------------------------------------------------------------
-
-// void method_info(void)
-_EVAL_DEF_INTRINSICS_FUNC(MethodInfo,
- art_portable_method_info,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG0())
-
-// int copy_int(int)
-_EVAL_DEF_INTRINSICS_FUNC(CopyInt,
- art_portable_copy_int,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// JavaObject* copy_obj(obj)
-_EVAL_DEF_INTRINSICS_FUNC(CopyObj,
- art_portable_copy_obj,
- kAttrReadOnly | kAttrNoThrow,
- kJavaObjectTy,
- _EXPAND_ARG1(kJavaObjectTy))
-
-// long copy_long(long)
-_EVAL_DEF_INTRINSICS_FUNC(CopyLong,
- art_portable_copy_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG1(kInt64Ty))
-
-// float copy_float(float)
-_EVAL_DEF_INTRINSICS_FUNC(CopyFloat,
- art_portable_copy_Float,
- kAttrReadOnly | kAttrNoThrow,
- kFloatTy,
- _EXPAND_ARG1(kFloatTy))
-
-// double copy_double(double)
-_EVAL_DEF_INTRINSICS_FUNC(CopyDouble,
- art_portable_copy_Double,
- kAttrReadOnly | kAttrNoThrow,
- kDoubleTy,
- _EXPAND_ARG1(kDoubleTy))
-
-//----------------------------------------------------------------------------
-// Shift intrinsics. Shift semantics for Dalvik are a bit different than
-// the llvm shift operators. For 32-bit shifts, the shift count is constrained
-// to the range of 0..31, while for 64-bit shifts we limit to 0..63.
-// Further, the shift count for Long shifts in Dalvik is 32 bits, while
-// llvm requires a 64-bit shift count. For GBC, we represent shifts as an
-// intrinsic to allow most efficient target-dependent lowering.
-//----------------------------------------------------------------------------
-// long shl_long(long,int)
-_EVAL_DEF_INTRINSICS_FUNC(SHLLong,
- art_portable_shl_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG2(kInt64Ty,kInt32Ty))
-// long shr_long(long,int)
-_EVAL_DEF_INTRINSICS_FUNC(SHRLong,
- art_portable_shr_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG2(kInt64Ty,kInt32Ty))
-// long ushr_long(long,int)
-_EVAL_DEF_INTRINSICS_FUNC(USHRLong,
- art_portable_ushl_long,
- kAttrReadOnly | kAttrNoThrow,
- kInt64Ty,
- _EXPAND_ARG2(kInt64Ty,kInt32Ty))
-// int shl_int(int,int)
-_EVAL_DEF_INTRINSICS_FUNC(SHLInt,
- art_portable_shl_int,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty,kInt32Ty))
-// long shr_int(int,int)
-_EVAL_DEF_INTRINSICS_FUNC(SHRInt,
- art_portable_shr_int,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty,kInt32Ty))
-// int ushr_long(int,int)
-_EVAL_DEF_INTRINSICS_FUNC(USHRInt,
- art_portable_ushl_int,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG2(kInt32Ty,kInt32Ty))
-//----------------------------------------------------------------------------
-// Conversion instrinsics. Note: these should eventually be removed. We
-// can express these directly in bitcode, but by using intrinsics the
-// Quick compiler can be more efficient. Some extra optimization infrastructure
-// will have to be developed to undo the bitcode verbosity when these are
-// done inline.
-//----------------------------------------------------------------------------
-// int int_to_byte(int)
-_EVAL_DEF_INTRINSICS_FUNC(IntToByte,
- art_portable_int_to_byte,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// int int_to_char(int)
-_EVAL_DEF_INTRINSICS_FUNC(IntToChar,
- art_portable_int_to_char,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-// int int_to_short(int)
-_EVAL_DEF_INTRINSICS_FUNC(IntToShort,
- art_portable_int_to_short,
- kAttrReadOnly | kAttrNoThrow,
- kInt32Ty,
- _EXPAND_ARG1(kInt32Ty))
-
-//----------------------------------------------------------------------------
-// Memory barrier
-//----------------------------------------------------------------------------
-// void constructor_barrier()
-_EVAL_DEF_INTRINSICS_FUNC(ConstructorBarrier,
- art_portable_constructor_barrier,
- kAttrReadOnly | kAttrNoThrow,
- kVoidTy,
- _EXPAND_ARG0())
-
-// Clean up all internal used macros
-#undef _EXPAND_ARG0
-#undef _EXPAND_ARG1
-#undef _EXPAND_ARG2
-#undef _EXPAND_ARG3
-#undef _EXPAND_ARG4
-#undef _EXPAND_ARG5
-
-#undef _JTYPE_OF_kInt1Ty_UNDER_kArray
-#undef _JTYPE_OF_kInt8Ty_UNDER_kArray
-#undef _JTYPE_OF_kInt16Ty_UNDER_kArray
-#undef _JTYPE_OF_kInt32Ty_UNDER_kArray
-#undef _JTYPE_OF_kInt64Ty_UNDER_kArray
-#undef _JTYPE_OF_kJavaObjectTy_UNDER_kArray
-
-#undef _JTYPE_OF_kInt1Ty_UNDER_kField
-#undef _JTYPE_OF_kInt8Ty_UNDER_kField
-#undef _JTYPE_OF_kInt16Ty_UNDER_kField
-#undef _JTYPE_OF_kInt32Ty_UNDER_kField
-#undef _JTYPE_OF_kInt64Ty_UNDER_kField
-#undef _JTYPE_OF_kJavaObjectTy_UNDER_kField
-
-#undef DEF_INTRINSICS_FUNC
diff --git a/compiler/llvm/intrinsic_helper.cc b/compiler/llvm/intrinsic_helper.cc
deleted file mode 100644
index e5e7998f6c..0000000000
--- a/compiler/llvm/intrinsic_helper.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "intrinsic_helper.h"
-
-#include "ir_builder.h"
-
-#include <llvm/IR/Attributes.h>
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/IRBuilder.h>
-#include <llvm/IR/Intrinsics.h>
-
-namespace art {
-namespace llvm {
-
-const IntrinsicHelper::IntrinsicInfo IntrinsicHelper::Info[] = {
-#define DEF_INTRINSICS_FUNC(_, NAME, ATTR, RET_TYPE, ARG1_TYPE, ARG2_TYPE, \
- ARG3_TYPE, ARG4_TYPE, \
- ARG5_TYPE) \
- { #NAME, ATTR, RET_TYPE, { ARG1_TYPE, ARG2_TYPE, \
- ARG3_TYPE, ARG4_TYPE, \
- ARG5_TYPE} },
-#include "intrinsic_func_list.def"
-};
-
-static ::llvm::Type* GetLLVMTypeOfIntrinsicValType(IRBuilder& irb,
- IntrinsicHelper::IntrinsicValType type) {
- switch (type) {
- case IntrinsicHelper::kVoidTy: {
- return irb.getVoidTy();
- }
- case IntrinsicHelper::kJavaObjectTy: {
- return irb.getJObjectTy();
- }
- case IntrinsicHelper::kJavaMethodTy: {
- return irb.getJMethodTy();
- }
- case IntrinsicHelper::kJavaThreadTy: {
- return irb.getJThreadTy();
- }
- case IntrinsicHelper::kInt1Ty:
- case IntrinsicHelper::kInt1ConstantTy: {
- return irb.getInt1Ty();
- }
- case IntrinsicHelper::kInt8Ty:
- case IntrinsicHelper::kInt8ConstantTy: {
- return irb.getInt8Ty();
- }
- case IntrinsicHelper::kInt16Ty:
- case IntrinsicHelper::kInt16ConstantTy: {
- return irb.getInt16Ty();
- }
- case IntrinsicHelper::kInt32Ty:
- case IntrinsicHelper::kInt32ConstantTy: {
- return irb.getInt32Ty();
- }
- case IntrinsicHelper::kInt64Ty:
- case IntrinsicHelper::kInt64ConstantTy: {
- return irb.getInt64Ty();
- }
- case IntrinsicHelper::kFloatTy:
- case IntrinsicHelper::kFloatConstantTy: {
- return irb.getFloatTy();
- }
- case IntrinsicHelper::kDoubleTy:
- case IntrinsicHelper::kDoubleConstantTy: {
- return irb.getDoubleTy();
- }
- case IntrinsicHelper::kNone:
- case IntrinsicHelper::kVarArgTy:
- default: {
- LOG(FATAL) << "Invalid intrinsic type " << type << "to get LLVM type!";
- return NULL;
- }
- }
- // unreachable
-}
-
-IntrinsicHelper::IntrinsicHelper(::llvm::LLVMContext& context,
- ::llvm::Module& module) {
- IRBuilder irb(context, module, *this);
-
- ::memset(intrinsic_funcs_, 0, sizeof(intrinsic_funcs_));
-
- // This loop does the following things:
- // 1. Introduce the intrinsic function into the module
- // 2. Add "nocapture" and "noalias" attribute to the arguments in all
- // intrinsics functions.
- // 3. Initialize intrinsic_funcs_map_.
- for (unsigned i = 0; i < MaxIntrinsicId; i++) {
- IntrinsicId id = static_cast<IntrinsicId>(i);
- const IntrinsicInfo& info = Info[i];
-
- // Parse and construct the argument type from IntrinsicInfo
- ::llvm::Type* arg_type[kIntrinsicMaxArgc];
- unsigned num_args = 0;
- bool is_var_arg = false;
- for (unsigned arg_iter = 0; arg_iter < kIntrinsicMaxArgc; arg_iter++) {
- IntrinsicValType type = info.arg_type_[arg_iter];
-
- if (type == kNone) {
- break;
- } else if (type == kVarArgTy) {
- // Variable argument type must be the last argument
- is_var_arg = true;
- break;
- }
-
- arg_type[num_args++] = GetLLVMTypeOfIntrinsicValType(irb, type);
- }
-
- // Construct the function type
- ::llvm::Type* ret_type =
- GetLLVMTypeOfIntrinsicValType(irb, info.ret_val_type_);
-
- ::llvm::FunctionType* type =
- ::llvm::FunctionType::get(ret_type,
- ::llvm::ArrayRef< ::llvm::Type*>(arg_type, num_args),
- is_var_arg);
-
- // Declare the function
- ::llvm::Function *fn = ::llvm::Function::Create(type,
- ::llvm::Function::ExternalLinkage,
- info.name_, &module);
-
- if (info.attr_ & kAttrReadOnly) {
- fn->setOnlyReadsMemory();
- }
- if (info.attr_ & kAttrReadNone) {
- fn->setDoesNotAccessMemory();
- }
- // None of the intrinsics throws exception
- fn->setDoesNotThrow();
-
- intrinsic_funcs_[id] = fn;
-
- DCHECK_NE(fn, static_cast< ::llvm::Function*>(NULL)) << "Intrinsic `"
- << GetName(id) << "' was not defined!";
-
- // Add "noalias" and "nocapture" attribute to all arguments of pointer type
- for (::llvm::Function::arg_iterator arg_iter = fn->arg_begin(),
- arg_end = fn->arg_end(); arg_iter != arg_end; arg_iter++) {
- if (arg_iter->getType()->isPointerTy()) {
- std::vector< ::llvm::Attribute::AttrKind> attributes;
- attributes.push_back(::llvm::Attribute::NoCapture);
- attributes.push_back(::llvm::Attribute::NoAlias);
- ::llvm::AttributeSet attribute_set = ::llvm::AttributeSet::get(fn->getContext(),
- arg_iter->getArgNo(),
- attributes);
- arg_iter->addAttr(attribute_set);
- }
- }
-
- // Insert the newly created intrinsic to intrinsic_funcs_map_
- if (!intrinsic_funcs_map_.insert(std::make_pair(fn, id)).second) {
- LOG(FATAL) << "Duplicate entry in intrinsic functions map?";
- }
- }
-
- return;
-}
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/intrinsic_helper.h b/compiler/llvm/intrinsic_helper.h
deleted file mode 100644
index 657db402fa..0000000000
--- a/compiler/llvm/intrinsic_helper.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_INTRINSIC_HELPER_H_
-#define ART_COMPILER_LLVM_INTRINSIC_HELPER_H_
-
-#include "base/logging.h"
-
-#include <llvm/ADT/DenseMap.h>
-
-namespace llvm {
- class Function;
- class FunctionType;
- class LLVMContext;
- class Module;
-} // namespace llvm
-
-namespace art {
-namespace llvm {
-
-class IRBuilder;
-
-class IntrinsicHelper {
- public:
- enum IntrinsicId {
-#define DEF_INTRINSICS_FUNC(ID, ...) ID,
-#include "intrinsic_func_list.def"
- MaxIntrinsicId,
-
- // Pseudo-intrinsics Id
- UnknownId
- };
-
- enum IntrinsicAttribute {
- kAttrNone = 0,
-
- // Intrinsic that neither modified the memory state nor refer to the global
- // state
- kAttrReadNone = 1 << 0,
-
- // Intrinsic that doesn't modify the memory state. Note that one should set
- // this flag carefully when the intrinsic may throw exception. Since the
- // thread state is implicitly modified when an exception is thrown.
- kAttrReadOnly = 1 << 1,
-
- // Note that intrinsic without kAttrNoThrow and kAttrDoThrow set means that
- // intrinsic generates exception in some cases
-
- // Intrinsic that never generates exception
- kAttrNoThrow = 1 << 2,
- // Intrinsic that always generate exception
- kAttrDoThrow = 1 << 3,
- };
-
- enum IntrinsicValType {
- kNone,
-
- kVoidTy,
-
- kJavaObjectTy,
- kJavaMethodTy,
- kJavaThreadTy,
-
- kInt1Ty,
- kInt8Ty,
- kInt16Ty,
- kInt32Ty,
- kInt64Ty,
- kFloatTy,
- kDoubleTy,
-
- kInt1ConstantTy,
- kInt8ConstantTy,
- kInt16ConstantTy,
- kInt32ConstantTy,
- kInt64ConstantTy,
- kFloatConstantTy,
- kDoubleConstantTy,
-
- kVarArgTy,
- };
-
- enum {
- kIntrinsicMaxArgc = 5
- };
-
- typedef struct IntrinsicInfo {
- const char* name_;
- unsigned attr_;
- IntrinsicValType ret_val_type_;
- IntrinsicValType arg_type_[kIntrinsicMaxArgc];
- } IntrinsicInfo;
-
- private:
- static const IntrinsicInfo Info[];
-
- public:
- static const IntrinsicInfo& GetInfo(IntrinsicId id) {
- DCHECK(id >= 0 && id < MaxIntrinsicId) << "Unknown ART intrinsics ID: "
- << id;
- return Info[id];
- }
-
- static const char* GetName(IntrinsicId id) {
- return (id <= MaxIntrinsicId) ? GetInfo(id).name_ : "InvalidIntrinsic";
- }
-
- static unsigned GetAttr(IntrinsicId id) {
- return GetInfo(id).attr_;
- }
-
- public:
- IntrinsicHelper(::llvm::LLVMContext& context, ::llvm::Module& module);
-
- ::llvm::Function* GetIntrinsicFunction(IntrinsicId id) {
- DCHECK(id >= 0 && id < MaxIntrinsicId) << "Unknown ART intrinsics ID: "
- << id;
- return intrinsic_funcs_[id];
- }
-
- IntrinsicId GetIntrinsicId(const ::llvm::Function* func) const {
- ::llvm::DenseMap<const ::llvm::Function*, IntrinsicId>::const_iterator
- i = intrinsic_funcs_map_.find(func);
- if (i == intrinsic_funcs_map_.end()) {
- return UnknownId;
- } else {
- return i->second;
- }
- }
-
- private:
- // FIXME: "+1" is to workaround the GCC bugs:
- // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
- // Remove this when uses newer GCC (> 4.4.3)
- ::llvm::Function* intrinsic_funcs_[MaxIntrinsicId + 1];
-
- // Map a llvm::Function to its intrinsic id
- ::llvm::DenseMap<const ::llvm::Function*, IntrinsicId> intrinsic_funcs_map_;
-};
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_INTRINSIC_HELPER_H_
diff --git a/compiler/llvm/ir_builder.cc b/compiler/llvm/ir_builder.cc
deleted file mode 100644
index 9644ebd976..0000000000
--- a/compiler/llvm/ir_builder.cc
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ir_builder.h"
-
-#include "base/stringprintf.h"
-
-#include <llvm/IR/Module.h>
-
-namespace art {
-namespace llvm {
-
-
-//----------------------------------------------------------------------------
-// General
-//----------------------------------------------------------------------------
-
-IRBuilder::IRBuilder(::llvm::LLVMContext& context, ::llvm::Module& module,
- IntrinsicHelper& intrinsic_helper)
- : LLVMIRBuilder(context), module_(&module), mdb_(context), java_object_type_(NULL),
- java_method_type_(NULL), java_thread_type_(NULL), intrinsic_helper_(intrinsic_helper) {
- // Get java object type from module
- ::llvm::Type* jobject_struct_type = module.getTypeByName("JavaObject");
- CHECK(jobject_struct_type != NULL);
- java_object_type_ = jobject_struct_type->getPointerTo();
-
- // If type of Method is not explicitly defined in the module, use JavaObject*
- ::llvm::Type* type = module.getTypeByName("Method");
- if (type != NULL) {
- java_method_type_ = type->getPointerTo();
- } else {
- java_method_type_ = java_object_type_;
- }
-
- // If type of Thread is not explicitly defined in the module, use JavaObject*
- type = module.getTypeByName("Thread");
- if (type != NULL) {
- java_thread_type_ = type->getPointerTo();
- } else {
- java_thread_type_ = java_object_type_;
- }
-
- // Create JEnv* type
- ::llvm::Type* jenv_struct_type = ::llvm::StructType::create(context, "JEnv");
- jenv_type_ = jenv_struct_type->getPointerTo();
-
- // Get Art shadow frame struct type from module
- art_frame_type_ = module.getTypeByName("ShadowFrame");
- CHECK(art_frame_type_ != NULL);
-
- runtime_support_ = NULL;
-}
-
-
-//----------------------------------------------------------------------------
-// Type Helper Function
-//----------------------------------------------------------------------------
-
-::llvm::Type* IRBuilder::getJType(JType jty) {
- switch (jty) {
- case kVoid:
- return getJVoidTy();
-
- case kBoolean:
- return getJBooleanTy();
-
- case kByte:
- return getJByteTy();
-
- case kChar:
- return getJCharTy();
-
- case kShort:
- return getJShortTy();
-
- case kInt:
- return getJIntTy();
-
- case kLong:
- return getJLongTy();
-
- case kFloat:
- return getJFloatTy();
-
- case kDouble:
- return getJDoubleTy();
-
- case kObject:
- return getJObjectTy();
-
- default:
- LOG(FATAL) << "Unknown java type: " << jty;
- return NULL;
- }
-}
-
-::llvm::StructType* IRBuilder::getShadowFrameTy(uint32_t vreg_size) {
- std::string name(StringPrintf("ShadowFrame%u", vreg_size));
-
- // Try to find the existing struct type definition
- if (::llvm::Type* type = module_->getTypeByName(name)) {
- CHECK(::llvm::isa< ::llvm::StructType>(type));
- return static_cast< ::llvm::StructType*>(type);
- }
-
- // Create new struct type definition
- ::llvm::Type* elem_types[] = {
- art_frame_type_,
- ::llvm::ArrayType::get(getInt32Ty(), vreg_size),
- };
-
- return ::llvm::StructType::create(elem_types, name);
-}
-
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h
deleted file mode 100644
index 990ba02d3d..0000000000
--- a/compiler/llvm/ir_builder.h
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_IR_BUILDER_H_
-#define ART_COMPILER_LLVM_IR_BUILDER_H_
-
-#include "backend_types.h"
-#include "dex/compiler_enums.h"
-#include "intrinsic_helper.h"
-#include "md_builder.h"
-#include "runtime_support_builder.h"
-#include "runtime_support_llvm_func.h"
-
-#include <llvm/IR/Constants.h>
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/IRBuilder.h>
-#include <llvm/IR/LLVMContext.h>
-#include <llvm/IR/Type.h>
-#include <llvm/Support/NoFolder.h>
-
-#include <stdint.h>
-
-
-namespace art {
-namespace llvm {
-
-class InserterWithDexOffset : public ::llvm::IRBuilderDefaultInserter<true> {
- public:
- InserterWithDexOffset() : node_(NULL) {}
-
- void InsertHelper(::llvm::Instruction *I, const ::llvm::Twine &Name,
- ::llvm::BasicBlock *BB,
- ::llvm::BasicBlock::iterator InsertPt) const {
- ::llvm::IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
- if (node_ != NULL) {
- I->setMetadata("DexOff", node_);
- }
- }
-
- void SetDexOffset(::llvm::MDNode* node) {
- node_ = node;
- }
- private:
- ::llvm::MDNode* node_;
-};
-
-typedef ::llvm::IRBuilder<true, ::llvm::ConstantFolder, InserterWithDexOffset> LLVMIRBuilder;
-// NOTE: Here we define our own LLVMIRBuilder type alias, so that we can
-// switch "preserveNames" template parameter easily.
-
-
-class IRBuilder : public LLVMIRBuilder {
- public:
- //--------------------------------------------------------------------------
- // General
- //--------------------------------------------------------------------------
-
- IRBuilder(::llvm::LLVMContext& context, ::llvm::Module& module,
- IntrinsicHelper& intrinsic_helper);
-
-
- //--------------------------------------------------------------------------
- // Extend load & store for TBAA
- //--------------------------------------------------------------------------
-
- ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, ::llvm::MDNode* tbaa_info) {
- ::llvm::LoadInst* inst = LLVMIRBuilder::CreateLoad(ptr);
- inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
- return inst;
- }
-
- ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr, ::llvm::MDNode* tbaa_info) {
- ::llvm::StoreInst* inst = LLVMIRBuilder::CreateStore(val, ptr);
- inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
- return inst;
- }
-
- ::llvm::AtomicCmpXchgInst*
- CreateAtomicCmpXchgInst(::llvm::Value* ptr, ::llvm::Value* cmp, ::llvm::Value* val,
- ::llvm::MDNode* tbaa_info) {
- ::llvm::AtomicCmpXchgInst* inst =
- LLVMIRBuilder::CreateAtomicCmpXchg(ptr, cmp, val, ::llvm::Acquire);
- inst->setMetadata(::llvm::LLVMContext::MD_tbaa, tbaa_info);
- return inst;
- }
-
- //--------------------------------------------------------------------------
- // Extend memory barrier
- //--------------------------------------------------------------------------
- void CreateMemoryBarrier(MemBarrierKind barrier_kind) {
- // TODO: select atomic ordering according to given barrier kind.
- CreateFence(::llvm::SequentiallyConsistent);
- }
-
- //--------------------------------------------------------------------------
- // TBAA
- //--------------------------------------------------------------------------
-
- // TODO: After we design the non-special TBAA info, re-design the TBAA interface.
- ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, TBAASpecialType special_ty) {
- return CreateLoad(ptr, mdb_.GetTBAASpecialType(special_ty));
- }
-
- ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr, TBAASpecialType special_ty) {
- DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
- return CreateStore(val, ptr, mdb_.GetTBAASpecialType(special_ty));
- }
-
- ::llvm::LoadInst* CreateLoad(::llvm::Value* ptr, TBAASpecialType special_ty, JType j_ty) {
- return CreateLoad(ptr, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
- }
-
- ::llvm::StoreInst* CreateStore(::llvm::Value* val, ::llvm::Value* ptr,
- TBAASpecialType special_ty, JType j_ty) {
- DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
- return CreateStore(val, ptr, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
- }
-
- ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Type* type,
- TBAASpecialType special_ty) {
- return LoadFromObjectOffset(object_addr, offset, type, mdb_.GetTBAASpecialType(special_ty));
- }
-
- void StoreToObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Value* new_value,
- TBAASpecialType special_ty) {
- DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
- StoreToObjectOffset(object_addr, offset, new_value, mdb_.GetTBAASpecialType(special_ty));
- }
-
- ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Type* type,
- TBAASpecialType special_ty, JType j_ty) {
- return LoadFromObjectOffset(object_addr, offset, type, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
- }
-
- void StoreToObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Value* new_value,
- TBAASpecialType special_ty, JType j_ty) {
- DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
- StoreToObjectOffset(object_addr, offset, new_value, mdb_.GetTBAAMemoryJType(special_ty, j_ty));
- }
-
- ::llvm::AtomicCmpXchgInst*
- CompareExchangeObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Value* cmp_value,
- ::llvm::Value* new_value,
- TBAASpecialType special_ty) {
- DCHECK_NE(special_ty, kTBAAConstJObject) << "ConstJObject is read only!";
- return CompareExchangeObjectOffset(object_addr, offset, cmp_value, new_value,
- mdb_.GetTBAASpecialType(special_ty));
- }
-
- void SetTBAA(::llvm::Instruction* inst, TBAASpecialType special_ty) {
- inst->setMetadata(::llvm::LLVMContext::MD_tbaa, mdb_.GetTBAASpecialType(special_ty));
- }
-
-
- //--------------------------------------------------------------------------
- // Static Branch Prediction
- //--------------------------------------------------------------------------
-
- // Import the orignal conditional branch
- using LLVMIRBuilder::CreateCondBr;
- ::llvm::BranchInst* CreateCondBr(::llvm::Value *cond,
- ::llvm::BasicBlock* true_bb,
- ::llvm::BasicBlock* false_bb,
- ExpectCond expect) {
- ::llvm::BranchInst* branch_inst = CreateCondBr(cond, true_bb, false_bb);
- if (false) {
- // TODO: http://b/8511695 Restore branch weight metadata
- branch_inst->setMetadata(::llvm::LLVMContext::MD_prof, mdb_.GetBranchWeights(expect));
- }
- return branch_inst;
- }
-
-
- //--------------------------------------------------------------------------
- // Pointer Arithmetic Helper Function
- //--------------------------------------------------------------------------
-
- ::llvm::IntegerType* getPtrEquivIntTy() {
- return getInt32Ty();
- }
-
- size_t getSizeOfPtrEquivInt() {
- return 4;
- }
-
- ::llvm::ConstantInt* getSizeOfPtrEquivIntValue() {
- return getPtrEquivInt(getSizeOfPtrEquivInt());
- }
-
- ::llvm::ConstantInt* getPtrEquivInt(int64_t i) {
- return ::llvm::ConstantInt::get(getPtrEquivIntTy(), i);
- }
-
- ::llvm::Value* CreatePtrDisp(::llvm::Value* base,
- ::llvm::Value* offset,
- ::llvm::PointerType* ret_ty) {
- ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy());
- ::llvm::Value* result_int = CreateAdd(base_int, offset);
- ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty);
-
- return result;
- }
-
- ::llvm::Value* CreatePtrDisp(::llvm::Value* base,
- ::llvm::Value* bs,
- ::llvm::Value* count,
- ::llvm::Value* offset,
- ::llvm::PointerType* ret_ty) {
- ::llvm::Value* block_offset = CreateMul(bs, count);
- ::llvm::Value* total_offset = CreateAdd(block_offset, offset);
-
- return CreatePtrDisp(base, total_offset, ret_ty);
- }
-
- ::llvm::LoadInst* LoadFromObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Type* type,
- ::llvm::MDNode* tbaa_info) {
- // Convert offset to ::llvm::value
- ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
- // Calculate the value's address
- ::llvm::Value* value_addr = CreatePtrDisp(object_addr, llvm_offset, type->getPointerTo());
- // Load
- return CreateLoad(value_addr, tbaa_info);
- }
-
- void StoreToObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Value* new_value,
- ::llvm::MDNode* tbaa_info) {
- // Convert offset to ::llvm::value
- ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
- // Calculate the value's address
- ::llvm::Value* value_addr = CreatePtrDisp(object_addr,
- llvm_offset,
- new_value->getType()->getPointerTo());
- // Store
- CreateStore(new_value, value_addr, tbaa_info);
- }
-
- ::llvm::AtomicCmpXchgInst* CompareExchangeObjectOffset(::llvm::Value* object_addr,
- int64_t offset,
- ::llvm::Value* cmp_value,
- ::llvm::Value* new_value,
- ::llvm::MDNode* tbaa_info) {
- // Convert offset to ::llvm::value
- ::llvm::Value* llvm_offset = getPtrEquivInt(offset);
- // Calculate the value's address
- ::llvm::Value* value_addr = CreatePtrDisp(object_addr,
- llvm_offset,
- new_value->getType()->getPointerTo());
- // Atomic compare and exchange
- return CreateAtomicCmpXchgInst(value_addr, cmp_value, new_value, tbaa_info);
- }
-
-
- //--------------------------------------------------------------------------
- // Runtime Helper Function
- //--------------------------------------------------------------------------
-
- RuntimeSupportBuilder& Runtime() {
- return *runtime_support_;
- }
-
- // TODO: Deprecate
- ::llvm::Function* GetRuntime(runtime_support::RuntimeId rt) {
- return runtime_support_->GetRuntimeSupportFunction(rt);
- }
-
- // TODO: Deprecate
- void SetRuntimeSupport(RuntimeSupportBuilder* runtime_support) {
- // Can only set once. We can't do this on constructor, because RuntimeSupportBuilder needs
- // IRBuilder.
- if (runtime_support_ == NULL && runtime_support != NULL) {
- runtime_support_ = runtime_support;
- }
- }
-
-
- //--------------------------------------------------------------------------
- // Type Helper Function
- //--------------------------------------------------------------------------
-
- ::llvm::Type* getJType(char shorty_jty) {
- return getJType(GetJTypeFromShorty(shorty_jty));
- }
-
- ::llvm::Type* getJType(JType jty);
-
- ::llvm::Type* getJVoidTy() {
- return getVoidTy();
- }
-
- ::llvm::IntegerType* getJBooleanTy() {
- return getInt8Ty();
- }
-
- ::llvm::IntegerType* getJByteTy() {
- return getInt8Ty();
- }
-
- ::llvm::IntegerType* getJCharTy() {
- return getInt16Ty();
- }
-
- ::llvm::IntegerType* getJShortTy() {
- return getInt16Ty();
- }
-
- ::llvm::IntegerType* getJIntTy() {
- return getInt32Ty();
- }
-
- ::llvm::IntegerType* getJLongTy() {
- return getInt64Ty();
- }
-
- ::llvm::Type* getJFloatTy() {
- return getFloatTy();
- }
-
- ::llvm::Type* getJDoubleTy() {
- return getDoubleTy();
- }
-
- ::llvm::PointerType* getJObjectTy() {
- return java_object_type_;
- }
-
- ::llvm::PointerType* getJMethodTy() {
- return java_method_type_;
- }
-
- ::llvm::PointerType* getJThreadTy() {
- return java_thread_type_;
- }
-
- ::llvm::Type* getArtFrameTy() {
- return art_frame_type_;
- }
-
- ::llvm::PointerType* getJEnvTy() {
- return jenv_type_;
- }
-
- ::llvm::Type* getJValueTy() {
- // NOTE: JValue is an union type, which may contains boolean, byte, char,
- // short, int, long, float, double, Object. However, LLVM itself does
- // not support union type, so we have to return a type with biggest size,
- // then bitcast it before we use it.
- return getJLongTy();
- }
-
- ::llvm::StructType* getShadowFrameTy(uint32_t vreg_size);
-
-
- //--------------------------------------------------------------------------
- // Constant Value Helper Function
- //--------------------------------------------------------------------------
-
- ::llvm::ConstantInt* getJBoolean(bool is_true) {
- return (is_true) ? getTrue() : getFalse();
- }
-
- ::llvm::ConstantInt* getJByte(int8_t i) {
- return ::llvm::ConstantInt::getSigned(getJByteTy(), i);
- }
-
- ::llvm::ConstantInt* getJChar(int16_t i) {
- return ::llvm::ConstantInt::getSigned(getJCharTy(), i);
- }
-
- ::llvm::ConstantInt* getJShort(int16_t i) {
- return ::llvm::ConstantInt::getSigned(getJShortTy(), i);
- }
-
- ::llvm::ConstantInt* getJInt(int32_t i) {
- return ::llvm::ConstantInt::getSigned(getJIntTy(), i);
- }
-
- ::llvm::ConstantInt* getJLong(int64_t i) {
- return ::llvm::ConstantInt::getSigned(getJLongTy(), i);
- }
-
- ::llvm::Constant* getJFloat(float f) {
- return ::llvm::ConstantFP::get(getJFloatTy(), f);
- }
-
- ::llvm::Constant* getJDouble(double d) {
- return ::llvm::ConstantFP::get(getJDoubleTy(), d);
- }
-
- ::llvm::ConstantPointerNull* getJNull() {
- return ::llvm::ConstantPointerNull::get(getJObjectTy());
- }
-
- ::llvm::Constant* getJZero(char shorty_jty) {
- return getJZero(GetJTypeFromShorty(shorty_jty));
- }
-
- ::llvm::Constant* getJZero(JType jty) {
- switch (jty) {
- case kVoid:
- LOG(FATAL) << "Zero is not a value of void type";
- return NULL;
-
- case kBoolean:
- return getJBoolean(false);
-
- case kByte:
- return getJByte(0);
-
- case kChar:
- return getJChar(0);
-
- case kShort:
- return getJShort(0);
-
- case kInt:
- return getJInt(0);
-
- case kLong:
- return getJLong(0);
-
- case kFloat:
- return getJFloat(0.0f);
-
- case kDouble:
- return getJDouble(0.0);
-
- case kObject:
- return getJNull();
-
- default:
- LOG(FATAL) << "Unknown java type: " << jty;
- return NULL;
- }
- }
-
-
- private:
- ::llvm::Module* module_;
-
- MDBuilder mdb_;
-
- ::llvm::PointerType* java_object_type_;
- ::llvm::PointerType* java_method_type_;
- ::llvm::PointerType* java_thread_type_;
-
- ::llvm::PointerType* jenv_type_;
-
- ::llvm::StructType* art_frame_type_;
-
- RuntimeSupportBuilder* runtime_support_;
-
- IntrinsicHelper& intrinsic_helper_;
-};
-
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_IR_BUILDER_H_
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
deleted file mode 100644
index 741c2d7748..0000000000
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// TODO: TargetLibraryInfo is included before sys/... because on Android bionic does #define tricks like:
-//
-// #define stat64 stat
-// #define fstat64 fstat
-// #define lstat64 lstat
-//
-// which causes grief. bionic probably should not do that.
-#include <llvm/Target/TargetLibraryInfo.h>
-
-#include "llvm_compilation_unit.h"
-
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <string>
-
-#include <llvm/ADT/OwningPtr.h>
-#include <llvm/ADT/StringSet.h>
-#include <llvm/ADT/Triple.h>
-#include <llvm/Analysis/CallGraph.h>
-#include <llvm/Analysis/CallGraphSCCPass.h>
-#include <llvm/Analysis/Dominators.h>
-#include <llvm/Analysis/LoopInfo.h>
-#include <llvm/Analysis/LoopPass.h>
-#include <llvm/Analysis/RegionPass.h>
-#include <llvm/Analysis/ScalarEvolution.h>
-#include <llvm/Analysis/Verifier.h>
-#include <llvm/Assembly/PrintModulePass.h>
-#include <llvm/Bitcode/ReaderWriter.h>
-#include <llvm/CodeGen/MachineFrameInfo.h>
-#include <llvm/CodeGen/MachineFunction.h>
-#include <llvm/CodeGen/MachineFunctionPass.h>
-#include <llvm/DebugInfo.h>
-#include <llvm/IR/DataLayout.h>
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/LLVMContext.h>
-#include <llvm/IR/Module.h>
-#include <llvm/Object/ObjectFile.h>
-#include <llvm/PassManager.h>
-#include <llvm/Support/Debug.h>
-#include <llvm/Support/ELF.h>
-#include <llvm/Support/FormattedStream.h>
-#include <llvm/Support/ManagedStatic.h>
-#include <llvm/Support/MemoryBuffer.h>
-#include <llvm/Support/PassNameParser.h>
-#include <llvm/Support/PluginLoader.h>
-#include <llvm/Support/PrettyStackTrace.h>
-#include <llvm/Support/Signals.h>
-#include <llvm/Support/SystemUtils.h>
-#include <llvm/Support/TargetRegistry.h>
-#include <llvm/Support/TargetSelect.h>
-#include <llvm/Support/ToolOutputFile.h>
-#include <llvm/Support/raw_ostream.h>
-#include <llvm/Support/system_error.h>
-#include <llvm/Target/TargetMachine.h>
-#include <llvm/Transforms/IPO.h>
-#include <llvm/Transforms/IPO/PassManagerBuilder.h>
-#include <llvm/Transforms/Scalar.h>
-
-#include "base/logging.h"
-#include "base/unix_file/fd_file.h"
-#include "compiled_method.h"
-#include "compiler_llvm.h"
-#include "instruction_set.h"
-#include "ir_builder.h"
-#include "os.h"
-#include "runtime_support_builder_arm.h"
-#include "runtime_support_builder_x86.h"
-#include "utils_llvm.h"
-
-namespace art {
-namespace llvm {
-
-::llvm::FunctionPass*
-CreateGBCExpanderPass(const IntrinsicHelper& intrinsic_helper, IRBuilder& irb,
- CompilerDriver* compiler, const DexCompilationUnit* dex_compilation_unit);
-
-::llvm::Module* makeLLVMModuleContents(::llvm::Module* module);
-
-
-LlvmCompilationUnit::LlvmCompilationUnit(const CompilerLLVM* compiler_llvm, size_t cunit_id)
- : compiler_llvm_(compiler_llvm), cunit_id_(cunit_id) {
- driver_ = NULL;
- dex_compilation_unit_ = NULL;
- llvm_info_.reset(new LLVMInfo());
- context_.reset(llvm_info_->GetLLVMContext());
- module_ = llvm_info_->GetLLVMModule();
-
- // Include the runtime function declaration
- makeLLVMModuleContents(module_);
-
-
- intrinsic_helper_.reset(new IntrinsicHelper(*context_, *module_));
-
- // Create IRBuilder
- irb_.reset(new IRBuilder(*context_, *module_, *intrinsic_helper_));
-
- // We always need a switch case, so just use a normal function.
- switch (GetInstructionSet()) {
- default:
- runtime_support_.reset(new RuntimeSupportBuilder(*context_, *module_, *irb_));
- break;
- case kThumb2:
- case kArm:
- runtime_support_.reset(new RuntimeSupportBuilderARM(*context_, *module_, *irb_));
- break;
- case kX86:
- runtime_support_.reset(new RuntimeSupportBuilderX86(*context_, *module_, *irb_));
- break;
- }
-
- irb_->SetRuntimeSupport(runtime_support_.get());
-}
-
-
-LlvmCompilationUnit::~LlvmCompilationUnit() {
- ::llvm::LLVMContext* llvm_context = context_.release(); // Managed by llvm_info_
- CHECK(llvm_context != NULL);
-}
-
-
-InstructionSet LlvmCompilationUnit::GetInstructionSet() const {
- return compiler_llvm_->GetInstructionSet();
-}
-
-
-static std::string DumpDirectory() {
- if (kIsTargetBuild) {
- return GetDalvikCacheOrDie("llvm-dump");
- }
- return "/tmp";
-}
-
-void LlvmCompilationUnit::DumpBitcodeToFile() {
- std::string bitcode;
- DumpBitcodeToString(bitcode);
- std::string filename(StringPrintf("%s/Art%zu.bc", DumpDirectory().c_str(), cunit_id_));
- std::unique_ptr<File> output(OS::CreateEmptyFile(filename.c_str()));
- output->WriteFully(bitcode.data(), bitcode.size());
- LOG(INFO) << ".bc file written successfully: " << filename;
-}
-
-void LlvmCompilationUnit::DumpBitcodeToString(std::string& str_buffer) {
- ::llvm::raw_string_ostream str_os(str_buffer);
- ::llvm::WriteBitcodeToFile(module_, str_os);
-}
-
-bool LlvmCompilationUnit::Materialize() {
- const bool kDumpBitcode = false;
- if (kDumpBitcode) {
- // Dump the bitcode for debugging
- DumpBitcodeToFile();
- }
-
- // Compile and prelink ::llvm::Module
- if (!MaterializeToString(elf_object_)) {
- LOG(ERROR) << "Failed to materialize compilation unit " << cunit_id_;
- return false;
- }
-
- const bool kDumpELF = false;
- if (kDumpELF) {
- // Dump the ELF image for debugging
- std::string filename(StringPrintf("%s/Art%zu.o", DumpDirectory().c_str(), cunit_id_));
- std::unique_ptr<File> output(OS::CreateEmptyFile(filename.c_str()));
- output->WriteFully(elf_object_.data(), elf_object_.size());
- LOG(INFO) << ".o file written successfully: " << filename;
- }
-
- return true;
-}
-
-
-bool LlvmCompilationUnit::MaterializeToString(std::string& str_buffer) {
- ::llvm::raw_string_ostream str_os(str_buffer);
- return MaterializeToRawOStream(str_os);
-}
-
-
-bool LlvmCompilationUnit::MaterializeToRawOStream(::llvm::raw_ostream& out_stream) {
- // Lookup the LLVM target
- std::string target_triple;
- std::string target_cpu;
- std::string target_attr;
- CompilerDriver::InstructionSetToLLVMTarget(GetInstructionSet(), &target_triple, &target_cpu,
- &target_attr);
-
- std::string errmsg;
- const ::llvm::Target* target =
- ::llvm::TargetRegistry::lookupTarget(target_triple, errmsg);
-
- CHECK(target != NULL) << errmsg;
-
- // Target options
- ::llvm::TargetOptions target_options;
- target_options.FloatABIType = ::llvm::FloatABI::Soft;
- target_options.NoFramePointerElim = true;
- target_options.UseSoftFloat = false;
- target_options.EnableFastISel = false;
-
- // Create the ::llvm::TargetMachine
- ::llvm::OwningPtr< ::llvm::TargetMachine> target_machine(
- target->createTargetMachine(target_triple, target_cpu, target_attr, target_options,
- ::llvm::Reloc::Static, ::llvm::CodeModel::Small,
- ::llvm::CodeGenOpt::Aggressive));
-
- CHECK(target_machine.get() != NULL) << "Failed to create target machine";
-
- // Add target data
- const ::llvm::DataLayout* data_layout = target_machine->getDataLayout();
-
- // PassManager for code generation passes
- ::llvm::PassManager pm;
- pm.add(new ::llvm::DataLayout(*data_layout));
-
- // FunctionPassManager for optimization pass
- ::llvm::FunctionPassManager fpm(module_);
- fpm.add(new ::llvm::DataLayout(*data_layout));
-
- if (bitcode_filename_.empty()) {
- // If we don't need write the bitcode to file, add the AddSuspendCheckToLoopLatchPass to the
- // regular FunctionPass.
- fpm.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
- driver_, dex_compilation_unit_));
- } else {
- ::llvm::FunctionPassManager fpm2(module_);
- fpm2.add(CreateGBCExpanderPass(*llvm_info_->GetIntrinsicHelper(), *irb_.get(),
- driver_, dex_compilation_unit_));
- fpm2.doInitialization();
- for (::llvm::Module::iterator F = module_->begin(), E = module_->end();
- F != E; ++F) {
- fpm2.run(*F);
- }
- fpm2.doFinalization();
-
- // Write bitcode to file
- std::string errmsg;
-
- ::llvm::OwningPtr< ::llvm::tool_output_file> out_file(
- new ::llvm::tool_output_file(bitcode_filename_.c_str(), errmsg,
- ::llvm::sys::fs::F_Binary));
-
-
- if (!errmsg.empty()) {
- LOG(ERROR) << "Failed to create bitcode output file: " << errmsg;
- return false;
- }
-
- ::llvm::WriteBitcodeToFile(module_, out_file->os());
- out_file->keep();
- }
-
- // Add optimization pass
- ::llvm::PassManagerBuilder pm_builder;
- // TODO: Use inliner after we can do IPO.
- pm_builder.Inliner = NULL;
- // pm_builder.Inliner = ::llvm::createFunctionInliningPass();
- // pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
- // pm_builder.Inliner = ::llvm::createPartialInliningPass();
- pm_builder.OptLevel = 3;
- pm_builder.DisableUnitAtATime = 1;
- pm_builder.populateFunctionPassManager(fpm);
- pm_builder.populateModulePassManager(pm);
- pm.add(::llvm::createStripDeadPrototypesPass());
-
- // Add passes to emit ELF image
- {
- ::llvm::formatted_raw_ostream formatted_os(out_stream, false);
-
- // Ask the target to add backend passes as necessary.
- if (target_machine->addPassesToEmitFile(pm,
- formatted_os,
- ::llvm::TargetMachine::CGFT_ObjectFile,
- true)) {
- LOG(FATAL) << "Unable to generate ELF for this target";
- return false;
- }
-
- // Run the per-function optimization
- fpm.doInitialization();
- for (::llvm::Module::iterator F = module_->begin(), E = module_->end();
- F != E; ++F) {
- fpm.run(*F);
- }
- fpm.doFinalization();
-
- // Run the code generation passes
- pm.run(*module_);
- }
-
- return true;
-}
-
-// Check whether the align is less than or equal to the code alignment of
-// that architecture. Since the Oat writer only guarantee that the compiled
-// method being aligned to kArchAlignment, we have no way to align the ELf
-// section if the section alignment is greater than kArchAlignment.
-void LlvmCompilationUnit::CheckCodeAlign(uint32_t align) const {
- InstructionSet insn_set = GetInstructionSet();
- size_t insn_set_align = GetInstructionSetAlignment(insn_set);
- CHECK_LE(align, static_cast<uint32_t>(insn_set_align));
-}
-
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/llvm_compilation_unit.h b/compiler/llvm/llvm_compilation_unit.h
deleted file mode 100644
index f11fb6ed23..0000000000
--- a/compiler/llvm/llvm_compilation_unit.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
-#define ART_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/logging.h"
-#include "base/mutex.h"
-#include "dex/compiler_internals.h"
-#include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "globals.h"
-#include "instruction_set.h"
-#include "runtime_support_builder.h"
-#include "runtime_support_llvm_func.h"
-#include "safe_map.h"
-
-namespace art {
- class CompiledMethod;
-}
-
-namespace llvm {
- class Function;
- class LLVMContext;
- class Module;
- class raw_ostream;
-}
-
-namespace art {
-namespace llvm {
-
-class CompilerLLVM;
-class IRBuilder;
-
-class LlvmCompilationUnit {
- public:
- ~LlvmCompilationUnit();
-
- uint32_t GetCompilationUnitId() const {
- return cunit_id_;
- }
-
- InstructionSet GetInstructionSet() const;
-
- ::llvm::LLVMContext* GetLLVMContext() const {
- return context_.get();
- }
-
- ::llvm::Module* GetModule() const {
- return module_;
- }
-
- IRBuilder* GetIRBuilder() const {
- return irb_.get();
- }
-
- void SetBitcodeFileName(const std::string& bitcode_filename) {
- bitcode_filename_ = bitcode_filename;
- }
-
- LLVMInfo* GetQuickContext() const {
- return llvm_info_.get();
- }
- void SetCompilerDriver(CompilerDriver* driver) {
- driver_ = driver;
- }
- DexCompilationUnit* GetDexCompilationUnit() {
- return dex_compilation_unit_;
- }
- void SetDexCompilationUnit(DexCompilationUnit* dex_compilation_unit) {
- dex_compilation_unit_ = dex_compilation_unit;
- }
-
- bool Materialize();
-
- bool IsMaterialized() const {
- return !elf_object_.empty();
- }
-
- const std::string& GetElfObject() const {
- DCHECK(IsMaterialized());
- return elf_object_;
- }
-
- private:
- LlvmCompilationUnit(const CompilerLLVM* compiler_llvm,
- size_t cunit_id);
-
- const CompilerLLVM* compiler_llvm_;
- const size_t cunit_id_;
-
- std::unique_ptr< ::llvm::LLVMContext> context_;
- std::unique_ptr<IRBuilder> irb_;
- std::unique_ptr<RuntimeSupportBuilder> runtime_support_;
- ::llvm::Module* module_; // Managed by context_
- std::unique_ptr<IntrinsicHelper> intrinsic_helper_;
- std::unique_ptr<LLVMInfo> llvm_info_;
- CompilerDriver* driver_;
- DexCompilationUnit* dex_compilation_unit_;
-
- std::string bitcode_filename_;
-
- std::string elf_object_;
-
- SafeMap<const ::llvm::Function*, CompiledMethod*> compiled_methods_map_;
-
- void CheckCodeAlign(uint32_t offset) const;
-
- void DumpBitcodeToFile();
- void DumpBitcodeToString(std::string& str_buffer);
-
- bool MaterializeToString(std::string& str_buffer);
- bool MaterializeToRawOStream(::llvm::raw_ostream& out_stream);
-
- friend class CompilerLLVM; // For LlvmCompilationUnit constructor
-};
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_LLVM_COMPILATION_UNIT_H_
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
deleted file mode 100644
index fa93e00e19..0000000000
--- a/compiler/llvm/llvm_compiler.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "llvm_compiler.h"
-
-#include "base/macros.h"
-#ifdef ART_USE_PORTABLE_COMPILER
-#include "compiler.h"
-#include "compiler_llvm.h"
-#include "dex/portable/mir_to_gbc.h"
-#include "dex_file.h"
-#include "elf_writer_mclinker.h"
-#include "mirror/art_method-inl.h"
-#endif
-
-namespace art {
-
-#ifdef ART_USE_PORTABLE_COMPILER
-
-namespace llvm {
-
-// Thread-local storage compiler worker threads
-class LLVMCompilerTls : public CompilerTls {
- public:
- LLVMCompilerTls() : llvm_info_(nullptr) {}
- ~LLVMCompilerTls() {}
-
- void* GetLLVMInfo() { return llvm_info_; }
-
- void SetLLVMInfo(void* llvm_info) { llvm_info_ = llvm_info; }
-
- private:
- void* llvm_info_;
-};
-
-
-
-class LLVMCompiler FINAL : public Compiler {
- public:
- explicit LLVMCompiler(CompilerDriver* driver) : Compiler(driver, 1000) {}
-
- CompilerTls* CreateNewCompilerTls() {
- return new LLVMCompilerTls();
- }
-
- void Init() const OVERRIDE {
- ArtInitCompilerContext(GetCompilerDriver());
- }
-
- void UnInit() const OVERRIDE {
- ArtUnInitCompilerContext(GetCompilerDriver());
- }
-
- bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
- OVERRIDE {
- return true;
- }
-
- CompiledMethod* Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const OVERRIDE {
- CompiledMethod* method = TryCompileWithSeaIR(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) {
- return method;
- }
-
- return ArtCompileMethod(GetCompilerDriver(),
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
-
- CompiledMethod* JniCompile(uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const OVERRIDE {
- return ArtLLVMJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
- }
-
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode());
- }
-
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host, const CompilerDriver& driver) const
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return art::ElfWriterMclinker::Create(
- file, oat_writer, dex_files, android_root, is_host, driver);
- }
-
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- return PortableCodeGenerator(
- cu, cu->mir_graph.get(), &cu->arena,
- reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit));
- }
-
- void InitCompilationUnit(CompilationUnit& cu) const {
- // Fused long branches not currently useful in bitcode.
- cu.disable_opt |=
- (1 << kBranchFusing) |
- (1 << kSuppressExceptionEdges);
- }
-
- bool IsPortable() const OVERRIDE {
- return true;
- }
-
- void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
- typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
-
- SetBitcodeFileNameFn set_bitcode_file_name =
- reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
-
- set_bitcode_file_name(driver, filename);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LLVMCompiler);
-};
-
-} // namespace llvm
-#endif
-
-Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
-#ifdef ART_USE_PORTABLE_COMPILER
- return new llvm::LLVMCompiler(driver);
-#else
- UNUSED(driver);
- return nullptr;
-#endif
-}
-
-} // namespace art
diff --git a/compiler/llvm/llvm_compiler.h b/compiler/llvm/llvm_compiler.h
deleted file mode 100644
index da6d0e9784..0000000000
--- a/compiler/llvm/llvm_compiler.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_LLVM_COMPILER_H_
-#define ART_COMPILER_LLVM_LLVM_COMPILER_H_
-
-namespace art {
-
-class Compiler;
-class CompilerDriver;
-
-Compiler* CreateLLVMCompiler(CompilerDriver* driver);
-
-}
-
-#endif // ART_COMPILER_LLVM_LLVM_COMPILER_H_
diff --git a/compiler/llvm/md_builder.cc b/compiler/llvm/md_builder.cc
deleted file mode 100644
index 4331557a4a..0000000000
--- a/compiler/llvm/md_builder.cc
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include "md_builder.h"
-
-#include "llvm/IR/MDBuilder.h"
-
-#include <string>
-
-namespace art {
-namespace llvm {
-
-
-::llvm::MDNode* MDBuilder::GetTBAASpecialType(TBAASpecialType sty_id) {
- DCHECK_GE(sty_id, 0) << "Unknown TBAA special type: " << sty_id;
- DCHECK_LT(sty_id, MAX_TBAA_SPECIAL_TYPE) << "Unknown TBAA special type: " << sty_id;
- DCHECK(tbaa_root_ != NULL);
-
- ::llvm::MDNode*& spec_ty = tbaa_special_type_[sty_id];
- if (spec_ty == NULL) {
- switch (sty_id) {
- case kTBAARegister:
- spec_ty = createTBAANode("Register", tbaa_root_);
- break;
- case kTBAAStackTemp:
- spec_ty = createTBAANode("StackTemp", tbaa_root_);
- break;
- case kTBAAHeapArray:
- spec_ty = createTBAANode("HeapArray", tbaa_root_);
- break;
- case kTBAAHeapInstance:
- spec_ty = createTBAANode("HeapInstance", tbaa_root_);
- break;
- case kTBAAHeapStatic:
- spec_ty = createTBAANode("HeapStatic", tbaa_root_);
- break;
- case kTBAAJRuntime:
- spec_ty = createTBAANode("JRuntime", tbaa_root_);
- break;
- case kTBAARuntimeInfo:
- spec_ty = createTBAANode("RuntimeInfo", GetTBAASpecialType(kTBAAJRuntime));
- break;
- case kTBAAShadowFrame:
- spec_ty = createTBAANode("ShadowFrame", GetTBAASpecialType(kTBAAJRuntime));
- break;
- case kTBAAConstJObject:
- spec_ty = createTBAANode("ConstJObject", tbaa_root_, true);
- break;
- default:
- LOG(FATAL) << "Unknown TBAA special type: " << sty_id;
- break;
- }
- }
- return spec_ty;
-}
-
-::llvm::MDNode* MDBuilder::GetTBAAMemoryJType(TBAASpecialType sty_id, JType jty_id) {
- DCHECK(sty_id == kTBAAHeapArray ||
- sty_id == kTBAAHeapInstance ||
- sty_id == kTBAAHeapStatic) << "SpecialType must be array, instance, or static";
-
- DCHECK_GE(jty_id, 0) << "Unknown JType: " << jty_id;
- DCHECK_LT(jty_id, MAX_JTYPE) << "Unknown JType: " << jty_id;
- DCHECK_NE(jty_id, kVoid) << "Can't load/store Void type!";
-
- std::string name;
- size_t sty_mapped_index = 0;
- switch (sty_id) {
- case kTBAAHeapArray: sty_mapped_index = 0; name = "HeapArray "; break;
- case kTBAAHeapInstance: sty_mapped_index = 1; name = "HeapInstance "; break;
- case kTBAAHeapStatic: sty_mapped_index = 2; name = "HeapStatic "; break;
- default:
- LOG(FATAL) << "Unknown TBAA special type: " << sty_id;
- break;
- }
-
- ::llvm::MDNode*& spec_ty = tbaa_memory_jtype_[sty_mapped_index][jty_id];
- if (spec_ty != NULL) {
- return spec_ty;
- }
-
- switch (jty_id) {
- case kBoolean: name += "Boolean"; break;
- case kByte: name += "Byte"; break;
- case kChar: name += "Char"; break;
- case kShort: name += "Short"; break;
- case kInt: name += "Int"; break;
- case kLong: name += "Long"; break;
- case kFloat: name += "Float"; break;
- case kDouble: name += "Double"; break;
- case kObject: name += "Object"; break;
- default:
- LOG(FATAL) << "Unknown JType: " << jty_id;
- break;
- }
-
- spec_ty = createTBAANode(name, GetTBAASpecialType(sty_id));
- return spec_ty;
-}
-
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/md_builder.h b/compiler/llvm/md_builder.h
deleted file mode 100644
index 1246f9bd2b..0000000000
--- a/compiler/llvm/md_builder.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_MD_BUILDER_H_
-#define ART_COMPILER_LLVM_MD_BUILDER_H_
-
-#include "backend_types.h"
-
-#include "llvm/IR/MDBuilder.h"
-
-#include <cstring>
-
-namespace llvm {
- class LLVMContext;
- class MDNode;
-}
-
-namespace art {
-namespace llvm {
-
-typedef ::llvm::MDBuilder LLVMMDBuilder;
-
-class MDBuilder : public LLVMMDBuilder {
- public:
- explicit MDBuilder(::llvm::LLVMContext& context)
- : LLVMMDBuilder(context), tbaa_root_(createTBAARoot("Art TBAA Root")) {
- std::memset(tbaa_special_type_, 0, sizeof(tbaa_special_type_));
- std::memset(tbaa_memory_jtype_, 0, sizeof(tbaa_memory_jtype_));
-
- // Pre-generate the MDNode for static branch prediction
- // 64 and 4 are the llvm.expect's default values
- expect_cond_[kLikely] = createBranchWeights(64, 4);
- expect_cond_[kUnlikely] = createBranchWeights(4, 64);
- }
-
- ::llvm::MDNode* GetTBAASpecialType(TBAASpecialType special_ty);
- ::llvm::MDNode* GetTBAAMemoryJType(TBAASpecialType special_ty, JType j_ty);
-
- ::llvm::MDNode* GetBranchWeights(ExpectCond expect) {
- DCHECK_LT(expect, MAX_EXPECT) << "MAX_EXPECT is not for branch weight";
- return expect_cond_[expect];
- }
-
- private:
- ::llvm::MDNode* const tbaa_root_;
- ::llvm::MDNode* tbaa_special_type_[MAX_TBAA_SPECIAL_TYPE];
- // There are 3 categories of memory types will not alias: array element, instance field, and
- // static field.
- ::llvm::MDNode* tbaa_memory_jtype_[3][MAX_JTYPE];
-
- ::llvm::MDNode* expect_cond_[MAX_EXPECT];
-};
-
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_MD_BUILDER_H_
diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc
deleted file mode 100644
index c825fbf190..0000000000
--- a/compiler/llvm/runtime_support_builder.cc
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support_builder.h"
-
-#include "gc/accounting/card_table.h"
-#include "ir_builder.h"
-#include "monitor.h"
-#include "mirror/object.h"
-#include "runtime_support_llvm_func_list.h"
-#include "thread.h"
-
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/Module.h>
-#include <llvm/IR/Type.h>
-
-using ::llvm::BasicBlock;
-using ::llvm::CallInst;
-using ::llvm::Function;
-using ::llvm::Value;
-
-namespace art {
-namespace llvm {
-
-RuntimeSupportBuilder::RuntimeSupportBuilder(::llvm::LLVMContext& context,
- ::llvm::Module& module,
- IRBuilder& irb)
- : context_(context), module_(module), irb_(irb) {
- memset(target_runtime_support_func_, 0, sizeof(target_runtime_support_func_));
-#define GET_RUNTIME_SUPPORT_FUNC_DECL(ID, NAME) \
- do { \
- ::llvm::Function* fn = module_.getFunction(#NAME); \
- DCHECK(fn != NULL) << "Function not found: " << #NAME; \
- runtime_support_func_decls_[runtime_support::ID] = fn; \
- } while (0);
-
- RUNTIME_SUPPORT_FUNC_LIST(GET_RUNTIME_SUPPORT_FUNC_DECL)
-}
-
-
-/* Thread */
-
-::llvm::Value* RuntimeSupportBuilder::EmitGetCurrentThread() {
- Function* func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
- CallInst* call_inst = irb_.CreateCall(func);
- call_inst->setOnlyReadsMemory();
- irb_.SetTBAA(call_inst, kTBAAConstJObject);
- return call_inst;
-}
-
-::llvm::Value* RuntimeSupportBuilder::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
- TBAASpecialType s_ty) {
- Value* thread = EmitGetCurrentThread();
- return irb_.LoadFromObjectOffset(thread, offset, type, s_ty);
-}
-
-void RuntimeSupportBuilder::EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
- TBAASpecialType s_ty) {
- Value* thread = EmitGetCurrentThread();
- irb_.StoreToObjectOffset(thread, offset, value, s_ty);
-}
-
-::llvm::Value* RuntimeSupportBuilder::EmitSetCurrentThread(::llvm::Value* thread) {
- Function* func = GetRuntimeSupportFunction(runtime_support::SetCurrentThread);
- return irb_.CreateCall(func, thread);
-}
-
-
-/* ShadowFrame */
-
-::llvm::Value* RuntimeSupportBuilder::EmitPushShadowFrame(::llvm::Value* new_shadow_frame,
- ::llvm::Value* method,
- uint32_t num_vregs) {
- Value* old_shadow_frame = EmitLoadFromThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
- irb_.getArtFrameTy()->getPointerTo(),
- kTBAARuntimeInfo);
- EmitStoreToThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
- new_shadow_frame,
- kTBAARuntimeInfo);
-
- // Store the method pointer
- irb_.StoreToObjectOffset(new_shadow_frame,
- ShadowFrame::MethodOffset(),
- method,
- kTBAAShadowFrame);
-
- // Store the number of vregs
- irb_.StoreToObjectOffset(new_shadow_frame,
- ShadowFrame::NumberOfVRegsOffset(),
- irb_.getInt32(num_vregs),
- kTBAAShadowFrame);
-
- // Store the link to previous shadow frame
- irb_.StoreToObjectOffset(new_shadow_frame,
- ShadowFrame::LinkOffset(),
- old_shadow_frame,
- kTBAAShadowFrame);
-
- return old_shadow_frame;
-}
-
-::llvm::Value*
-RuntimeSupportBuilder::EmitPushShadowFrameNoInline(::llvm::Value* new_shadow_frame,
- ::llvm::Value* method,
- uint32_t num_vregs) {
- Function* func = GetRuntimeSupportFunction(runtime_support::PushShadowFrame);
- ::llvm::CallInst* call_inst =
- irb_.CreateCall4(func,
- EmitGetCurrentThread(),
- new_shadow_frame,
- method,
- irb_.getInt32(num_vregs));
- irb_.SetTBAA(call_inst, kTBAARuntimeInfo);
- return call_inst;
-}
-
-void RuntimeSupportBuilder::EmitPopShadowFrame(::llvm::Value* old_shadow_frame) {
- // Store old shadow frame to TopShadowFrame
- EmitStoreToThreadOffset(Thread::TopShadowFrameOffset().Int32Value(),
- old_shadow_frame,
- kTBAARuntimeInfo);
-}
-
-
-/* Exception */
-
-::llvm::Value* RuntimeSupportBuilder::EmitGetAndClearException() {
- Function* slow_func = GetRuntimeSupportFunction(runtime_support::GetAndClearException);
- return irb_.CreateCall(slow_func, EmitGetCurrentThread());
-}
-
-::llvm::Value* RuntimeSupportBuilder::EmitIsExceptionPending() {
- Value* exception = EmitLoadFromThreadOffset(Thread::ExceptionOffset().Int32Value(),
- irb_.getJObjectTy(),
- kTBAARuntimeInfo);
- // If exception not null
- return irb_.CreateIsNotNull(exception);
-}
-
-
-/* Suspend */
-
-void RuntimeSupportBuilder::EmitTestSuspend() {
- Function* slow_func = GetRuntimeSupportFunction(runtime_support::TestSuspend);
- CallInst* call_inst = irb_.CreateCall(slow_func, EmitGetCurrentThread());
- irb_.SetTBAA(call_inst, kTBAAJRuntime);
-}
-
-
-/* Monitor */
-
-void RuntimeSupportBuilder::EmitLockObject(::llvm::Value* object) {
- Function* slow_func = GetRuntimeSupportFunction(runtime_support::LockObject);
- irb_.CreateCall2(slow_func, object, EmitGetCurrentThread());
-}
-
-void RuntimeSupportBuilder::EmitUnlockObject(::llvm::Value* object) {
- Function* slow_func = GetRuntimeSupportFunction(runtime_support::UnlockObject);
- irb_.CreateCall2(slow_func, object, EmitGetCurrentThread());
-}
-
-
-void RuntimeSupportBuilder::EmitMarkGCCard(::llvm::Value* value, ::llvm::Value* target_addr) {
- Function* parent_func = irb_.GetInsertBlock()->getParent();
- BasicBlock* bb_mark_gc_card = BasicBlock::Create(context_, "mark_gc_card", parent_func);
- BasicBlock* bb_cont = BasicBlock::Create(context_, "mark_gc_card_cont", parent_func);
-
- ::llvm::Value* not_null = irb_.CreateIsNotNull(value);
- irb_.CreateCondBr(not_null, bb_mark_gc_card, bb_cont);
-
- irb_.SetInsertPoint(bb_mark_gc_card);
- Value* card_table = EmitLoadFromThreadOffset(Thread::CardTableOffset().Int32Value(),
- irb_.getInt8Ty()->getPointerTo(),
- kTBAAConstJObject);
- Value* target_addr_int = irb_.CreatePtrToInt(target_addr, irb_.getPtrEquivIntTy());
- Value* card_no = irb_.CreateLShr(target_addr_int,
- irb_.getPtrEquivInt(gc::accounting::CardTable::kCardShift));
- Value* card_table_entry = irb_.CreateGEP(card_table, card_no);
- irb_.CreateStore(irb_.getInt8(gc::accounting::CardTable::kCardDirty), card_table_entry,
- kTBAARuntimeInfo);
- irb_.CreateBr(bb_cont);
-
- irb_.SetInsertPoint(bb_cont);
-}
-
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/runtime_support_builder.h b/compiler/llvm/runtime_support_builder.h
deleted file mode 100644
index 898611af75..0000000000
--- a/compiler/llvm/runtime_support_builder.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
-#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
-
-#include "backend_types.h"
-#include "base/logging.h"
-#include "runtime_support_llvm_func.h"
-
-#include <stdint.h>
-
-namespace llvm {
- class LLVMContext;
- class Module;
- class Function;
- class Type;
- class Value;
-}
-
-namespace art {
-namespace llvm {
-
-class IRBuilder;
-
-
-class RuntimeSupportBuilder {
- public:
- RuntimeSupportBuilder(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb);
-
- /* Thread */
- virtual ::llvm::Value* EmitGetCurrentThread();
- virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
- TBAASpecialType s_ty);
- virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
- TBAASpecialType s_ty);
- virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
-
- /* ShadowFrame */
- virtual ::llvm::Value* EmitPushShadowFrame(::llvm::Value* new_shadow_frame,
- ::llvm::Value* method, uint32_t num_vregs);
- virtual ::llvm::Value* EmitPushShadowFrameNoInline(::llvm::Value* new_shadow_frame,
- ::llvm::Value* method, uint32_t num_vregs);
- virtual void EmitPopShadowFrame(::llvm::Value* old_shadow_frame);
-
- /* Exception */
- virtual ::llvm::Value* EmitGetAndClearException();
- virtual ::llvm::Value* EmitIsExceptionPending();
-
- /* Suspend */
- virtual void EmitTestSuspend();
-
- /* Monitor */
- void EmitLockObject(::llvm::Value* object);
- void EmitUnlockObject(::llvm::Value* object);
-
- /* MarkGCCard */
- virtual void EmitMarkGCCard(::llvm::Value* value, ::llvm::Value* target_addr);
-
- ::llvm::Function* GetRuntimeSupportFunction(runtime_support::RuntimeId id) {
- if (id >= 0 && id < runtime_support::MAX_ID) {
- return runtime_support_func_decls_[id];
- } else {
- LOG(ERROR) << "Unknown runtime function id: " << id;
- return NULL;
- }
- }
-
- virtual ~RuntimeSupportBuilder() {}
-
- protected:
- ::llvm::LLVMContext& context_;
- ::llvm::Module& module_;
- IRBuilder& irb_;
-
- private:
- ::llvm::Function* runtime_support_func_decls_[runtime_support::MAX_ID];
- bool target_runtime_support_func_[runtime_support::MAX_ID];
-};
-
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_H_
diff --git a/compiler/llvm/runtime_support_builder_arm.cc b/compiler/llvm/runtime_support_builder_arm.cc
deleted file mode 100644
index cad46247fd..0000000000
--- a/compiler/llvm/runtime_support_builder_arm.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support_builder_arm.h"
-
-#include "ir_builder.h"
-#include "thread.h"
-#include "utils_llvm.h"
-
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/InlineAsm.h>
-#include <llvm/IR/Module.h>
-#include <llvm/IR/Type.h>
-
-#include <vector>
-
-using ::llvm::CallInst;
-using ::llvm::Function;
-using ::llvm::FunctionType;
-using ::llvm::InlineAsm;
-using ::llvm::IntegerType;
-using ::llvm::Type;
-using ::llvm::Value;
-
-namespace {
-
-char LDRSTRSuffixByType(art::llvm::IRBuilder& irb, Type* type) {
- int width = type->isPointerTy() ?
- irb.getSizeOfPtrEquivInt()*8 :
- ::llvm::cast<IntegerType>(type)->getBitWidth();
- switch (width) {
- case 8: return 'b';
- case 16: return 'h';
- case 32: return ' ';
- default:
- LOG(FATAL) << "Unsupported width: " << width;
- return ' ';
- }
-}
-
-} // namespace
-
-namespace art {
-namespace llvm {
-
-/* Thread */
-
-Value* RuntimeSupportBuilderARM::EmitGetCurrentThread() {
- Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
- InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), "mov $0, r9", "=r", false);
- CallInst* thread = irb_.CreateCall(func);
- thread->setDoesNotAccessMemory();
- irb_.SetTBAA(thread, kTBAAConstJObject);
- return thread;
-}
-
-Value* RuntimeSupportBuilderARM::EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
- TBAASpecialType s_ty) {
- FunctionType* func_ty = FunctionType::get(/*Result=*/type,
- /*isVarArg=*/false);
- std::string inline_asm(StringPrintf("ldr%c $0, [r9, #%d]",
- LDRSTRSuffixByType(irb_, type),
- static_cast<int>(offset)));
- InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "=r", true);
- CallInst* result = irb_.CreateCall(func);
- result->setOnlyReadsMemory();
- irb_.SetTBAA(result, s_ty);
- return result;
-}
-
-void RuntimeSupportBuilderARM::EmitStoreToThreadOffset(int64_t offset, Value* value,
- TBAASpecialType s_ty) {
- FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
- /*Params=*/value->getType(),
- /*isVarArg=*/false);
- std::string inline_asm(StringPrintf("str%c $0, [r9, #%d]",
- LDRSTRSuffixByType(irb_, value->getType()),
- static_cast<int>(offset)));
- InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "r", true);
- CallInst* call_inst = irb_.CreateCall(func, value);
- irb_.SetTBAA(call_inst, s_ty);
-}
-
-Value* RuntimeSupportBuilderARM::EmitSetCurrentThread(Value* thread) {
- // Separate to two InlineAsm: The first one produces the return value, while the second,
- // sets the current thread.
- // LLVM can delete the first one if the caller in LLVM IR doesn't use the return value.
- //
- // Here we don't call EmitGetCurrentThread, because we mark it as DoesNotAccessMemory and
- // ConstJObject. We denote side effect to "true" below instead, so LLVM won't
- // reorder these instructions incorrectly.
- Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
- InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), "mov $0, r9", "=r", true);
- CallInst* old_thread_register = irb_.CreateCall(func);
- old_thread_register->setOnlyReadsMemory();
-
- FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
- /*Params=*/irb_.getJObjectTy(),
- /*isVarArg=*/false);
- func = InlineAsm::get(func_ty, "mov r9, $0", "r", true);
- irb_.CreateCall(func, thread);
- return old_thread_register;
-}
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/runtime_support_builder_arm.h b/compiler/llvm/runtime_support_builder_arm.h
deleted file mode 100644
index 0d01509be0..0000000000
--- a/compiler/llvm/runtime_support_builder_arm.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
-#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
-
-#include "runtime_support_builder.h"
-
-namespace art {
-namespace llvm {
-
-class RuntimeSupportBuilderARM : public RuntimeSupportBuilder {
- public:
- RuntimeSupportBuilderARM(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb)
- : RuntimeSupportBuilder(context, module, irb) {}
-
- /* Thread */
- virtual ::llvm::Value* EmitGetCurrentThread();
- virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
- TBAASpecialType s_ty);
- virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
- TBAASpecialType s_ty);
- virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
-};
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_ARM_H_
diff --git a/compiler/llvm/runtime_support_builder_x86.cc b/compiler/llvm/runtime_support_builder_x86.cc
deleted file mode 100644
index 3d11f9df5b..0000000000
--- a/compiler/llvm/runtime_support_builder_x86.cc
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support_builder_x86.h"
-
-#include "base/stringprintf.h"
-#include "ir_builder.h"
-#include "thread.h"
-#include "utils_llvm.h"
-
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/InlineAsm.h>
-#include <llvm/IR/Module.h>
-#include <llvm/IR/Type.h>
-
-#include <vector>
-
-using ::llvm::CallInst;
-using ::llvm::Function;
-using ::llvm::FunctionType;
-using ::llvm::InlineAsm;
-using ::llvm::Type;
-using ::llvm::UndefValue;
-using ::llvm::Value;
-
-namespace art {
-namespace llvm {
-
-
-Value* RuntimeSupportBuilderX86::EmitGetCurrentThread() {
- Function* ori_func = GetRuntimeSupportFunction(runtime_support::GetCurrentThread);
- std::string inline_asm(StringPrintf("mov %%fs:%d, $0", Thread::SelfOffset().Int32Value()));
- InlineAsm* func = InlineAsm::get(ori_func->getFunctionType(), inline_asm, "=r", false);
- CallInst* thread = irb_.CreateCall(func);
- thread->setDoesNotAccessMemory();
- irb_.SetTBAA(thread, kTBAAConstJObject);
- return thread;
-}
-
-Value* RuntimeSupportBuilderX86::EmitLoadFromThreadOffset(int64_t offset, Type* type,
- TBAASpecialType s_ty) {
- FunctionType* func_ty = FunctionType::get(/*Result=*/type,
- /*isVarArg=*/false);
- std::string inline_asm(StringPrintf("mov %%fs:%d, $0", static_cast<int>(offset)));
- InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "=r", true);
- CallInst* result = irb_.CreateCall(func);
- result->setOnlyReadsMemory();
- irb_.SetTBAA(result, s_ty);
- return result;
-}
-
-void RuntimeSupportBuilderX86::EmitStoreToThreadOffset(int64_t offset, Value* value,
- TBAASpecialType s_ty) {
- FunctionType* func_ty = FunctionType::get(/*Result=*/Type::getVoidTy(context_),
- /*Params=*/value->getType(),
- /*isVarArg=*/false);
- std::string inline_asm(StringPrintf("mov $0, %%fs:%d", static_cast<int>(offset)));
- InlineAsm* func = InlineAsm::get(func_ty, inline_asm, "r", true);
- CallInst* call_inst = irb_.CreateCall(func, value);
- irb_.SetTBAA(call_inst, s_ty);
-}
-
-Value* RuntimeSupportBuilderX86::EmitSetCurrentThread(Value*) {
- /* Nothing to be done. */
- return UndefValue::get(irb_.getJObjectTy());
-}
-
-
-} // namespace llvm
-} // namespace art
diff --git a/compiler/llvm/runtime_support_builder_x86.h b/compiler/llvm/runtime_support_builder_x86.h
deleted file mode 100644
index 5f36e7ce18..0000000000
--- a/compiler/llvm/runtime_support_builder_x86.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
-#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
-
-#include "runtime_support_builder.h"
-
-namespace art {
-namespace llvm {
-
-class RuntimeSupportBuilderX86 : public RuntimeSupportBuilder {
- public:
- RuntimeSupportBuilderX86(::llvm::LLVMContext& context, ::llvm::Module& module, IRBuilder& irb)
- : RuntimeSupportBuilder(context, module, irb) {}
-
- /* Thread */
- virtual ::llvm::Value* EmitGetCurrentThread();
- virtual ::llvm::Value* EmitLoadFromThreadOffset(int64_t offset, ::llvm::Type* type,
- TBAASpecialType s_ty);
- virtual void EmitStoreToThreadOffset(int64_t offset, ::llvm::Value* value,
- TBAASpecialType s_ty);
- virtual ::llvm::Value* EmitSetCurrentThread(::llvm::Value* thread);
-};
-
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_BUILDER_X86_H_
diff --git a/compiler/llvm/runtime_support_llvm_func.h b/compiler/llvm/runtime_support_llvm_func.h
deleted file mode 100644
index a5ad852b49..0000000000
--- a/compiler/llvm/runtime_support_llvm_func.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_
-#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_
-
-#include "runtime_support_llvm_func_list.h"
-
-namespace art {
-namespace llvm {
-namespace runtime_support {
-
- enum RuntimeId {
-#define DEFINE_RUNTIME_SUPPORT_FUNC_ID(ID, NAME) ID,
- RUNTIME_SUPPORT_FUNC_LIST(DEFINE_RUNTIME_SUPPORT_FUNC_ID)
-
- MAX_ID
- };
-
-} // namespace runtime_support
-} // namespace llvm
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_
diff --git a/compiler/llvm/runtime_support_llvm_func_list.h b/compiler/llvm/runtime_support_llvm_func_list.h
deleted file mode 100644
index b5ac1ffe63..0000000000
--- a/compiler/llvm/runtime_support_llvm_func_list.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
-#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
-
-#define RUNTIME_SUPPORT_FUNC_LIST(V) \
- V(LockObject, art_portable_lock_object_from_code) \
- V(UnlockObject, art_portable_unlock_object_from_code) \
- V(GetCurrentThread, art_portable_get_current_thread_from_code) \
- V(SetCurrentThread, art_portable_set_current_thread_from_code) \
- V(PushShadowFrame, art_portable_push_shadow_frame_from_code) \
- V(PopShadowFrame, art_portable_pop_shadow_frame_from_code) \
- V(TestSuspend, art_portable_test_suspend_from_code) \
- V(ThrowException, art_portable_throw_exception_from_code) \
- V(ThrowStackOverflowException, art_portable_throw_stack_overflow_from_code) \
- V(ThrowNullPointerException, art_portable_throw_null_pointer_exception_from_code) \
- V(ThrowDivZeroException, art_portable_throw_div_zero_from_code) \
- V(ThrowIndexOutOfBounds, art_portable_throw_array_bounds_from_code) \
- V(InitializeTypeAndVerifyAccess, art_portable_initialize_type_and_verify_access_from_code) \
- V(InitializeType, art_portable_initialize_type_from_code) \
- V(IsAssignable, art_portable_is_assignable_from_code) \
- V(CheckCast, art_portable_check_cast_from_code) \
- V(CheckPutArrayElement, art_portable_check_put_array_element_from_code) \
- V(AllocObject, art_portable_alloc_object_from_code) \
- V(AllocObjectWithAccessCheck, art_portable_alloc_object_from_code_with_access_check) \
- V(AllocArray, art_portable_alloc_array_from_code) \
- V(AllocArrayWithAccessCheck, art_portable_alloc_array_from_code_with_access_check) \
- V(CheckAndAllocArray, art_portable_check_and_alloc_array_from_code) \
- V(CheckAndAllocArrayWithAccessCheck, art_portable_check_and_alloc_array_from_code_with_access_check) \
- V(FindStaticMethodWithAccessCheck, art_portable_find_static_method_from_code_with_access_check) \
- V(FindDirectMethodWithAccessCheck, art_portable_find_direct_method_from_code_with_access_check) \
- V(FindVirtualMethodWithAccessCheck, art_portable_find_virtual_method_from_code_with_access_check) \
- V(FindSuperMethodWithAccessCheck, art_portable_find_super_method_from_code_with_access_check) \
- V(FindInterfaceMethodWithAccessCheck, art_portable_find_interface_method_from_code_with_access_check) \
- V(FindInterfaceMethod, art_portable_find_interface_method_from_code) \
- V(ResolveString, art_portable_resolve_string_from_code) \
- V(Set32Static, art_portable_set32_static_from_code) \
- V(Set64Static, art_portable_set64_static_from_code) \
- V(SetObjectStatic, art_portable_set_obj_static_from_code) \
- V(Get32Static, art_portable_get32_static_from_code) \
- V(Get64Static, art_portable_get64_static_from_code) \
- V(GetObjectStatic, art_portable_get_obj_static_from_code) \
- V(Set32Instance, art_portable_set32_instance_from_code) \
- V(Set64Instance, art_portable_set64_instance_from_code) \
- V(SetObjectInstance, art_portable_set_obj_instance_from_code) \
- V(Get32Instance, art_portable_get32_instance_from_code) \
- V(Get64Instance, art_portable_get64_instance_from_code) \
- V(GetObjectInstance, art_portable_get_obj_instance_from_code) \
- V(InitializeStaticStorage, art_portable_initialize_static_storage_from_code) \
- V(FillArrayData, art_portable_fill_array_data_from_code) \
- V(GetAndClearException, art_portable_get_and_clear_exception) \
- V(IsExceptionPending, art_portable_is_exception_pending_from_code) \
- V(FindCatchBlock, art_portable_find_catch_block_from_code) \
- V(MarkGCCard, art_portable_mark_gc_card_from_code) \
- V(ProxyInvokeHandler, art_portable_proxy_invoke_handler_from_code) \
- V(art_d2l, art_d2l) \
- V(art_d2i, art_d2i) \
- V(art_f2l, art_f2l) \
- V(art_f2i, art_f2i) \
- V(JniMethodStart, art_portable_jni_method_start) \
- V(JniMethodStartSynchronized, art_portable_jni_method_start_synchronized) \
- V(JniMethodEnd, art_portable_jni_method_end) \
- V(JniMethodEndSynchronized, art_portable_jni_method_end_synchronized) \
- V(JniMethodEndWithReference, art_portable_jni_method_end_with_reference) \
- V(JniMethodEndWithReferenceSynchronized, art_portable_jni_method_end_with_reference_synchronized)
-
-#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
diff --git a/compiler/llvm/tools/gen_art_module_cc.sh b/compiler/llvm/tools/gen_art_module_cc.sh
deleted file mode 100755
index c5df333e34..0000000000
--- a/compiler/llvm/tools/gen_art_module_cc.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash -e
-
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-SCRIPTDIR=`dirname "$0"`
-cd "${SCRIPTDIR}/.."
-
-mkdir -p generated
-
-OUTPUT_FILE=generated/art_module.cc
-
-echo "// Generated with ${0}" > ${OUTPUT_FILE}
-
-echo '
-
-#pragma GCC diagnostic ignored "-Wframe-larger-than="
-// TODO: Remove this pragma after llc can generate makeLLVMModuleContents()
-// with smaller frame size.
-
-#include <llvm/IR/DerivedTypes.h>
-#include <llvm/IR/Function.h>
-#include <llvm/IR/Module.h>
-#include <llvm/IR/Type.h>
-
-#include <vector>
-
-using namespace llvm;
-
-namespace art {
-namespace llvm {
-
-' >> ${OUTPUT_FILE}
-
-llc -march=cpp -cppgen=contents art_module.ll -o - >> ${OUTPUT_FILE}
-
-echo '
-} // namespace llvm
-} // namespace art' >> ${OUTPUT_FILE}
diff --git a/compiler/llvm/utils_llvm.h b/compiler/llvm/utils_llvm.h
deleted file mode 100644
index a606b91958..0000000000
--- a/compiler/llvm/utils_llvm.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LLVM_UTILS_LLVM_H_
-#define ART_COMPILER_LLVM_UTILS_LLVM_H_
-
-#include <llvm/Analysis/Verifier.h>
-
-namespace art {
-
-#ifndef NDEBUG
-#define VERIFY_LLVM_FUNCTION(func) ::llvm::verifyFunction(func, ::llvm::AbortProcessAction)
-#else
-#define VERIFY_LLVM_FUNCTION(func)
-#endif
-
-} // namespace art
-
-#endif // ART_COMPILER_LLVM_UTILS_LLVM_H_
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 9fe98e3663..3ca0cdf011 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -48,40 +48,23 @@ class OatTest : public CommonCompilerTest {
if (compiled_method == nullptr) {
EXPECT_TRUE(oat_method.GetQuickCode() == nullptr) << PrettyMethod(method) << " "
<< oat_method.GetQuickCode();
- EXPECT_TRUE(oat_method.GetPortableCode() == nullptr) << PrettyMethod(method) << " "
- << oat_method.GetPortableCode();
EXPECT_EQ(oat_method.GetFrameSizeInBytes(), 0U);
EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U);
EXPECT_EQ(oat_method.GetFpSpillMask(), 0U);
} else {
const void* quick_oat_code = oat_method.GetQuickCode();
- if (quick_oat_code != nullptr) {
- EXPECT_EQ(oat_method.GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
- EXPECT_EQ(oat_method.GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
- EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask());
- uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2);
- quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
- const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- EXPECT_TRUE(quick_code != nullptr);
- size_t code_size = quick_code->size() * sizeof(quick_code[0]);
- EXPECT_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size))
- << PrettyMethod(method) << " " << code_size;
- CHECK_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size));
- } else {
- const void* portable_oat_code = oat_method.GetPortableCode();
- EXPECT_TRUE(portable_oat_code != nullptr) << PrettyMethod(method);
- EXPECT_EQ(oat_method.GetFrameSizeInBytes(), 0U);
- EXPECT_EQ(oat_method.GetCoreSpillMask(), 0U);
- EXPECT_EQ(oat_method.GetFpSpillMask(), 0U);
- uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(portable_oat_code), 2);
- portable_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
- const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
- EXPECT_TRUE(portable_code != nullptr);
- size_t code_size = portable_code->size() * sizeof(portable_code[0]);
- EXPECT_EQ(0, memcmp(quick_oat_code, &portable_code[0], code_size))
- << PrettyMethod(method) << " " << code_size;
- CHECK_EQ(0, memcmp(quick_oat_code, &portable_code[0], code_size));
- }
+ EXPECT_TRUE(quick_oat_code != nullptr) << PrettyMethod(method);
+ EXPECT_EQ(oat_method.GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
+ EXPECT_EQ(oat_method.GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
+ EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask());
+ uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2);
+ quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
+ const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
+ EXPECT_TRUE(quick_code != nullptr);
+ size_t code_size = quick_code->size() * sizeof(quick_code[0]);
+ EXPECT_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size))
+ << PrettyMethod(method) << " " << code_size;
+ CHECK_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size));
}
}
};
@@ -91,9 +74,7 @@ TEST_F(OatTest, WriteRead) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: make selectable.
- Compiler::Kind compiler_kind = kUsePortableCompiler
- ? Compiler::kPortable
- : Compiler::kQuick;
+ Compiler::Kind compiler_kind = Compiler::kQuick;
InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
std::string error_msg;
@@ -189,7 +170,7 @@ TEST_F(OatTest, WriteRead) {
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(84U, sizeof(OatHeader));
+ EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(28U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(91 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8a7abb4001..7d14de1306 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -417,9 +417,6 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
size_interpreter_to_interpreter_bridge_(0),
size_interpreter_to_compiled_code_bridge_(0),
size_jni_dlsym_lookup_(0),
- size_portable_imt_conflict_trampoline_(0),
- size_portable_resolution_trampoline_(0),
- size_portable_to_interpreter_bridge_(0),
size_quick_generic_jni_trampoline_(0),
size_quick_imt_conflict_trampoline_(0),
size_quick_resolution_trampoline_(0),
@@ -530,7 +527,7 @@ struct OatWriter::GcMapDataAccess {
struct OatWriter::MappingTableDataAccess {
static const std::vector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return &compiled_method->GetMappingTable();
+ return compiled_method->GetMappingTable();
}
static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
@@ -722,110 +719,101 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
- const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- if (portable_code != nullptr) {
- CHECK(quick_code == nullptr);
- size_t oat_method_offsets_offset =
- oat_class->GetOatMethodOffsetsOffsetFromOatHeader(class_def_method_index);
- compiled_method->AddOatdataOffsetToCompliledCodeOffset(
- oat_method_offsets_offset + OFFSETOF_MEMBER(OatMethodOffsets, code_offset_));
+ CHECK(quick_code != nullptr);
+ offset_ = writer_->relative_call_patcher_->ReserveSpace(offset_, compiled_method);
+ offset_ = compiled_method->AlignCode(offset_);
+ DCHECK_ALIGNED_PARAM(offset_,
+ GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
+ uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+ CHECK_NE(code_size, 0U);
+ uint32_t thumb_offset = compiled_method->CodeDelta();
+ quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
+
+ bool deduped = false;
+
+ // Deduplicate code arrays.
+ auto lb = dedupe_map_.lower_bound(compiled_method);
+ if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
+ quick_code_offset = lb->second;
+ deduped = true;
} else {
- CHECK(quick_code != nullptr);
- offset_ = writer_->relative_call_patcher_->ReserveSpace(offset_, compiled_method);
- offset_ = compiled_method->AlignCode(offset_);
- DCHECK_ALIGNED_PARAM(offset_,
- GetInstructionSetAlignment(compiled_method->GetInstructionSet()));
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
- CHECK_NE(code_size, 0U);
- uint32_t thumb_offset = compiled_method->CodeDelta();
- quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
-
- bool deduped = false;
-
- // Deduplicate code arrays.
- auto lb = dedupe_map_.lower_bound(compiled_method);
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(compiled_method, lb->first)) {
- quick_code_offset = lb->second;
- deduped = true;
- } else {
- dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
- }
+ dedupe_map_.PutBefore(lb, compiled_method, quick_code_offset);
+ }
- MethodReference method_ref(dex_file_, it.GetMemberIndex());
- auto method_lb = writer_->method_offset_map_.lower_bound(method_ref);
- if (method_lb != writer_->method_offset_map_.end() &&
- !writer_->method_offset_map_.key_comp()(method_ref, method_lb->first)) {
- // TODO: Should this be a hard failure?
- LOG(WARNING) << "Multiple definitions of "
- << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file)
- << ((method_lb->second != quick_code_offset) ? "; OFFSET MISMATCH" : "");
- } else {
- writer_->method_offset_map_.PutBefore(method_lb, method_ref, quick_code_offset);
- }
+ MethodReference method_ref(dex_file_, it.GetMemberIndex());
+ auto method_lb = writer_->method_offset_map_.lower_bound(method_ref);
+ if (method_lb != writer_->method_offset_map_.end() &&
+ !writer_->method_offset_map_.key_comp()(method_ref, method_lb->first)) {
+ // TODO: Should this be a hard failure?
+ LOG(WARNING) << "Multiple definitions of "
+ << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file)
+ << ((method_lb->second != quick_code_offset) ? "; OFFSET MISMATCH" : "");
+ } else {
+ writer_->method_offset_map_.PutBefore(method_lb, method_ref, quick_code_offset);
+ }
- // Update quick method header.
- DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
- OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
- uint32_t mapping_table_offset = method_header->mapping_table_offset_;
- uint32_t vmap_table_offset = method_header->vmap_table_offset_;
- uint32_t gc_map_offset = method_header->gc_map_offset_;
- // The code offset was 0 when the mapping/vmap table offset was set, so it's set
- // to 0-offset and we need to adjust it by code_offset.
- uint32_t code_offset = quick_code_offset - thumb_offset;
- if (mapping_table_offset != 0u) {
- mapping_table_offset += code_offset;
- DCHECK_LT(mapping_table_offset, code_offset);
- }
- if (vmap_table_offset != 0u) {
- vmap_table_offset += code_offset;
- DCHECK_LT(vmap_table_offset, code_offset);
- }
- if (gc_map_offset != 0u) {
- gc_map_offset += code_offset;
- DCHECK_LT(gc_map_offset, code_offset);
- }
- uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
- uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
- *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset,
- gc_map_offset, frame_size_in_bytes, core_spill_mask,
- fp_spill_mask, code_size);
-
- if (!deduped) {
- // Update offsets. (Checksum is updated when writing.)
- offset_ += sizeof(*method_header); // Method header is prepended before code.
- offset_ += code_size;
- // Record absolute patch locations.
- if (!compiled_method->GetPatches().empty()) {
- uintptr_t base_loc = offset_ - code_size - writer_->oat_header_->GetExecutableOffset();
- for (const LinkerPatch& patch : compiled_method->GetPatches()) {
- if (patch.Type() != kLinkerPatchCallRelative) {
- writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
- }
+ // Update quick method header.
+ DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size());
+ OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_];
+ uint32_t mapping_table_offset = method_header->mapping_table_offset_;
+ uint32_t vmap_table_offset = method_header->vmap_table_offset_;
+ uint32_t gc_map_offset = method_header->gc_map_offset_;
+ // The code offset was 0 when the mapping/vmap table offset was set, so it's set
+ // to 0-offset and we need to adjust it by code_offset.
+ uint32_t code_offset = quick_code_offset - thumb_offset;
+ if (mapping_table_offset != 0u) {
+ mapping_table_offset += code_offset;
+ DCHECK_LT(mapping_table_offset, code_offset);
+ }
+ if (vmap_table_offset != 0u) {
+ vmap_table_offset += code_offset;
+ DCHECK_LT(vmap_table_offset, code_offset);
+ }
+ if (gc_map_offset != 0u) {
+ gc_map_offset += code_offset;
+ DCHECK_LT(gc_map_offset, code_offset);
+ }
+ uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
+ uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
+ uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
+ *method_header = OatQuickMethodHeader(mapping_table_offset, vmap_table_offset,
+ gc_map_offset, frame_size_in_bytes, core_spill_mask,
+ fp_spill_mask, code_size);
+
+ if (!deduped) {
+ // Update offsets. (Checksum is updated when writing.)
+ offset_ += sizeof(*method_header); // Method header is prepended before code.
+ offset_ += code_size;
+ // Record absolute patch locations.
+ if (!compiled_method->GetPatches().empty()) {
+ uintptr_t base_loc = offset_ - code_size - writer_->oat_header_->GetExecutableOffset();
+ for (const LinkerPatch& patch : compiled_method->GetPatches()) {
+ if (patch.Type() != kLinkerPatchCallRelative) {
+ writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
}
}
}
+ }
- if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
- // Record debug information for this function if we are doing that.
-
- std::string name = PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
- if (deduped) {
- // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
- // so that it will show up in a debuggerd crash report.
- name += " [ DEDUPED ]";
- }
+ if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ // Record debug information for this function if we are doing that.
- const uint32_t quick_code_start = quick_code_offset -
- writer_->oat_header_->GetExecutableOffset();
- const DexFile::CodeItem *code_item = it.GetMethodCodeItem();
- writer_->method_info_.push_back(DebugInfo(name,
- dex_file_->GetSourceFile(dex_file_->GetClassDef(class_def_index_)),
- quick_code_start, quick_code_start + code_size,
- code_item == nullptr ? nullptr : dex_file_->GetDebugInfoStream(code_item),
- compiled_method));
+ std::string name = PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
+ if (deduped) {
+ // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
+ // so that it will show up in a debuggerd crash report.
+ name += " [ DEDUPED ]";
}
+
+ const uint32_t quick_code_start = quick_code_offset -
+ writer_->oat_header_->GetExecutableOffset();
+ const DexFile::CodeItem *code_item = it.GetMethodCodeItem();
+ writer_->method_info_.push_back(DebugInfo(name,
+ dex_file_->GetSourceFile(dex_file_->GetClassDef(class_def_index_)),
+ quick_code_start, quick_code_start + code_size,
+ code_item == nullptr ? nullptr : dex_file_->GetDebugInfoStream(code_item),
+ compiled_method));
}
if (kIsDebugBuild) {
@@ -944,7 +932,6 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
std::string dump = exc->Dump();
LOG(FATAL) << dump;
}
- // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
method->SetQuickOatCodeOffset(offsets.code_offset_);
return true;
@@ -1005,7 +992,6 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
if (quick_code != nullptr) {
- CHECK(compiled_method->GetPortableCode() == nullptr);
offset_ = writer_->relative_call_patcher_->WriteThunks(out, offset_);
if (offset_ == 0u) {
ReportWriteFailure("relative call thunk", it);
@@ -1357,9 +1343,6 @@ size_t OatWriter::InitOatCode(size_t offset) {
DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge);
DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge);
DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
- DO_TRAMPOLINE(portable_imt_conflict_trampoline_, PortableImtConflictTrampoline);
- DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
- DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
@@ -1370,9 +1353,6 @@ size_t OatWriter::InitOatCode(size_t offset) {
oat_header_->SetInterpreterToInterpreterBridgeOffset(0);
oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0);
oat_header_->SetJniDlsymLookupOffset(0);
- oat_header_->SetPortableImtConflictTrampolineOffset(0);
- oat_header_->SetPortableResolutionTrampolineOffset(0);
- oat_header_->SetPortableToInterpreterBridgeOffset(0);
oat_header_->SetQuickGenericJniTrampolineOffset(0);
oat_header_->SetQuickImtConflictTrampolineOffset(0);
oat_header_->SetQuickResolutionTrampolineOffset(0);
@@ -1467,9 +1447,6 @@ bool OatWriter::Write(OutputStream* out) {
DO_STAT(size_interpreter_to_interpreter_bridge_);
DO_STAT(size_interpreter_to_compiled_code_bridge_);
DO_STAT(size_jni_dlsym_lookup_);
- DO_STAT(size_portable_imt_conflict_trampoline_);
- DO_STAT(size_portable_resolution_trampoline_);
- DO_STAT(size_portable_to_interpreter_bridge_);
DO_STAT(size_quick_generic_jni_trampoline_);
DO_STAT(size_quick_imt_conflict_trampoline_);
DO_STAT(size_quick_resolution_trampoline_);
@@ -1612,9 +1589,6 @@ size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t
DO_TRAMPOLINE(interpreter_to_interpreter_bridge_);
DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_);
DO_TRAMPOLINE(jni_dlsym_lookup_);
- DO_TRAMPOLINE(portable_imt_conflict_trampoline_);
- DO_TRAMPOLINE(portable_resolution_trampoline_);
- DO_TRAMPOLINE(portable_to_interpreter_bridge_);
DO_TRAMPOLINE(quick_generic_jni_trampoline_);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
DO_TRAMPOLINE(quick_resolution_trampoline_);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index b3ac7ff895..e020d31d7e 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -214,10 +214,7 @@ class OatWriter {
}
// Offset of start of OatClass from beginning of OatHeader. It is
- // used to validate file position when writing. For Portable, it
- // is also used to calculate the position of the OatMethodOffsets
- // so that code pointers within the OatMethodOffsets can be
- // patched to point to code in the Portable .o ELF objects.
+ // used to validate file position when writing.
size_t offset_;
// CompiledMethods for each class_def_method_index, or NULL if no method is available.
@@ -285,9 +282,6 @@ class OatWriter {
std::unique_ptr<const std::vector<uint8_t>> interpreter_to_interpreter_bridge_;
std::unique_ptr<const std::vector<uint8_t>> interpreter_to_compiled_code_bridge_;
std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_;
- std::unique_ptr<const std::vector<uint8_t>> portable_imt_conflict_trampoline_;
- std::unique_ptr<const std::vector<uint8_t>> portable_resolution_trampoline_;
- std::unique_ptr<const std::vector<uint8_t>> portable_to_interpreter_bridge_;
std::unique_ptr<const std::vector<uint8_t>> quick_generic_jni_trampoline_;
std::unique_ptr<const std::vector<uint8_t>> quick_imt_conflict_trampoline_;
std::unique_ptr<const std::vector<uint8_t>> quick_resolution_trampoline_;
@@ -302,9 +296,6 @@ class OatWriter {
uint32_t size_interpreter_to_interpreter_bridge_;
uint32_t size_interpreter_to_compiled_code_bridge_;
uint32_t size_jni_dlsym_lookup_;
- uint32_t size_portable_imt_conflict_trampoline_;
- uint32_t size_portable_resolution_trampoline_;
- uint32_t size_portable_to_interpreter_bridge_;
uint32_t size_quick_generic_jni_trampoline_;
uint32_t size_quick_imt_conflict_trampoline_;
uint32_t size_quick_resolution_trampoline_;
@@ -347,8 +338,8 @@ class OatWriter {
return lhs->GetQuickCode() < rhs->GetQuickCode();
}
// If the code is the same, all other fields are likely to be the same as well.
- if (UNLIKELY(&lhs->GetMappingTable() != &rhs->GetMappingTable())) {
- return &lhs->GetMappingTable() < &rhs->GetMappingTable();
+ if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
+ return lhs->GetMappingTable() < rhs->GetMappingTable();
}
if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
return &lhs->GetVmapTable() < &rhs->GetVmapTable();
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index c353d66f1e..d6c3515726 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -28,14 +28,54 @@ class MonotonicValueRange;
*/
class ValueBound : public ValueObject {
public:
- static ValueBound Create(HInstruction* instruction, int constant) {
- if (instruction == nullptr) {
- return ValueBound(nullptr, constant);
+ ValueBound(HInstruction* instruction, int constant) {
+ if (instruction != nullptr && instruction->IsIntConstant()) {
+ // Normalizing ValueBound with constant instruction.
+ int instr_const = instruction->AsIntConstant()->GetValue();
+ if (constant >= 0 && (instr_const <= INT_MAX - constant)) {
+ // No overflow.
+ instruction_ = nullptr;
+ constant_ = instr_const + constant;
+ return;
+ }
+ if (constant < 0 && (instr_const >= INT_MIN - constant)) {
+ // No underflow.
+ instruction_ = nullptr;
+ constant_ = instr_const + constant;
+ return;
+ }
}
+ instruction_ = instruction;
+ constant_ = constant;
+ }
+
+ // Try to detect useful value bound format from an instruction, e.g.
+ // a constant or array length related value.
+ static ValueBound DetectValueBoundFromValue(HInstruction* instruction, bool* found) {
+ DCHECK(instruction != nullptr);
if (instruction->IsIntConstant()) {
- return ValueBound(nullptr, instruction->AsIntConstant()->GetValue() + constant);
+ *found = true;
+ return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
+ }
+
+ if (instruction->IsArrayLength()) {
+ *found = true;
+ return ValueBound(instruction, 0);
+ }
+ // Try to detect (array.length + c) format.
+ if (instruction->IsAdd()) {
+ HAdd* add = instruction->AsAdd();
+ HInstruction* left = add->GetLeft();
+ HInstruction* right = add->GetRight();
+ if (left->IsArrayLength() && right->IsIntConstant()) {
+ *found = true;
+ return ValueBound(left, right->AsIntConstant()->GetValue());
+ }
}
- return ValueBound(instruction, constant);
+
+ // No useful bound detected.
+ *found = false;
+ return ValueBound::Max();
}
HInstruction* GetInstruction() const { return instruction_; }
@@ -140,7 +180,7 @@ class ValueBound : public ValueObject {
// overflows/underflows, then we can't accurately represent it. For correctness,
// just return Max/Min() depending on whether the returned ValueBound is used for
// lower/upper bound.
- ValueBound Add(int c, bool for_lower_bound, bool* overflow_or_underflow) const {
+ ValueBound Add(int c, bool* overflow_or_underflow) const {
*overflow_or_underflow = false;
if (c == 0) {
return *this;
@@ -151,7 +191,7 @@ class ValueBound : public ValueObject {
if (constant_ > INT_MAX - c) {
// Constant part overflows.
*overflow_or_underflow = true;
- return for_lower_bound ? Min() : Max();
+ return Max();
} else {
new_constant = constant_ + c;
}
@@ -159,7 +199,7 @@ class ValueBound : public ValueObject {
if (constant_ < INT_MIN - c) {
// Constant part underflows.
*overflow_or_underflow = true;
- return for_lower_bound ? Min() : Max();
+ return Max();
} else {
new_constant = constant_ + c;
}
@@ -168,9 +208,6 @@ class ValueBound : public ValueObject {
}
private:
- ValueBound(HInstruction* instruction, int constant)
- : instruction_(instruction), constant_(constant) {}
-
HInstruction* instruction_;
int constant_;
};
@@ -231,12 +268,12 @@ class ValueRange : public ArenaObject<kArenaAllocMisc> {
// return the full integer range.
ValueRange* Add(int constant) const {
bool overflow_or_underflow;
- ValueBound lower = lower_.Add(constant, true, &overflow_or_underflow);
+ ValueBound lower = lower_.Add(constant, &overflow_or_underflow);
if (overflow_or_underflow) {
// We can't accurately represent the bounds anymore.
return FullIntRange();
}
- ValueBound upper = upper_.Add(constant, false, &overflow_or_underflow);
+ ValueBound upper = upper_.Add(constant, &overflow_or_underflow);
if (overflow_or_underflow) {
// We can't accurately represent the bounds anymore.
return FullIntRange();
@@ -265,14 +302,16 @@ class ValueRange : public ArenaObject<kArenaAllocMisc> {
*/
class MonotonicValueRange : public ValueRange {
public:
- static MonotonicValueRange* Create(ArenaAllocator* allocator,
- HInstruction* initial, int increment) {
- DCHECK_NE(increment, 0);
- // To be conservative, give it full range [INT_MIN, INT_MAX] in case it's
- // used as a regular value range, due to possible overflow/underflow.
- return new (allocator) MonotonicValueRange(
- allocator, ValueBound::Min(), ValueBound::Max(), initial, increment);
- }
+ MonotonicValueRange(ArenaAllocator* allocator,
+ HInstruction* initial,
+ int increment,
+ ValueBound bound)
+ // To be conservative, give it full range [INT_MIN, INT_MAX] in case it's
+ // used as a regular value range, due to possible overflow/underflow.
+ : ValueRange(allocator, ValueBound::Min(), ValueBound::Max()),
+ initial_(initial),
+ increment_(increment),
+ bound_(bound) {}
virtual ~MonotonicValueRange() {}
@@ -298,8 +337,7 @@ class MonotonicValueRange : public ValueRange {
if (increment_ > 0) {
// Monotonically increasing.
- ValueBound lower = ValueBound::NarrowLowerBound(
- ValueBound::Create(initial_, 0), range->GetLower());
+ ValueBound lower = ValueBound::NarrowLowerBound(bound_, range->GetLower());
// We currently conservatively assume max array length is INT_MAX. If we can
// make assumptions about the max array length, e.g. due to the max heap size,
@@ -351,8 +389,7 @@ class MonotonicValueRange : public ValueRange {
} else {
DCHECK_NE(increment_, 0);
// Monotonically decreasing.
- ValueBound upper = ValueBound::NarrowUpperBound(
- ValueBound::Create(initial_, 0), range->GetUpper());
+ ValueBound upper = ValueBound::NarrowUpperBound(bound_, range->GetUpper());
// Need to take care of underflow. Try to prove underflow won't happen
// for common cases. Basically need to be able to prove for any value
@@ -370,14 +407,9 @@ class MonotonicValueRange : public ValueRange {
}
private:
- MonotonicValueRange(ArenaAllocator* allocator, ValueBound lower,
- ValueBound upper, HInstruction* initial, int increment)
- : ValueRange(allocator, lower, upper),
- initial_(initial),
- increment_(increment) {}
-
HInstruction* const initial_;
const int increment_;
+ ValueBound bound_; // Additional value bound info for initial_;
DISALLOW_COPY_AND_ASSIGN(MonotonicValueRange);
};
@@ -414,30 +446,6 @@ class BCEVisitor : public HGraphVisitor {
return nullptr;
}
- // Try to detect useful value bound format from an instruction, e.g.
- // a constant or array length related value.
- ValueBound DetectValueBoundFromValue(HInstruction* instruction) {
- if (instruction->IsIntConstant()) {
- return ValueBound::Create(nullptr, instruction->AsIntConstant()->GetValue());
- }
-
- if (instruction->IsArrayLength()) {
- return ValueBound::Create(instruction, 0);
- }
- // Try to detect (array.length + c) format.
- if (instruction->IsAdd()) {
- HAdd* add = instruction->AsAdd();
- HInstruction* left = add->GetLeft();
- HInstruction* right = add->GetRight();
- if (left->IsArrayLength() && right->IsIntConstant()) {
- return ValueBound::Create(left, right->AsIntConstant()->GetValue());
- }
- }
-
- // No useful bound detected.
- return ValueBound::Max();
- }
-
// Narrow the value range of 'instruction' at the end of 'basic_block' with 'range',
// and push the narrowed value range to 'successor'.
void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
@@ -462,9 +470,8 @@ class BCEVisitor : public HGraphVisitor {
// There should be no critical edge at this point.
DCHECK_EQ(false_successor->GetPredecessors().Size(), 1u);
- ValueBound bound = DetectValueBoundFromValue(right);
- bool found = !bound.Equals(ValueBound::Max());
-
+ bool found;
+ ValueBound bound = ValueBound::DetectValueBoundFromValue(right, &found);
ValueBound lower = bound;
ValueBound upper = bound;
if (!found) {
@@ -484,9 +491,10 @@ class BCEVisitor : public HGraphVisitor {
if (cond == kCondLT || cond == kCondLE) {
if (!upper.Equals(ValueBound::Max())) {
int compensation = (cond == kCondLT) ? -1 : 0; // upper bound is inclusive
- ValueBound new_upper = upper.Add(compensation, false, &overflow_or_underflow);
- // overflow_or_underflow is ignored here since we already use ValueBound::Min()
- // for lower bound.
+ ValueBound new_upper = upper.Add(compensation, &overflow_or_underflow);
+ if (overflow_or_underflow) {
+ new_upper = ValueBound::Max();
+ }
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, true_successor, new_range);
@@ -495,9 +503,10 @@ class BCEVisitor : public HGraphVisitor {
// array.length as a lower bound isn't considered useful.
if (!lower.Equals(ValueBound::Min()) && !lower.IsRelativeToArrayLength()) {
int compensation = (cond == kCondLE) ? 1 : 0; // lower bound is inclusive
- ValueBound new_lower = lower.Add(compensation, true, &overflow_or_underflow);
- // overflow_or_underflow is ignored here since we already use ValueBound::Max()
- // for upper bound.
+ ValueBound new_lower = lower.Add(compensation, &overflow_or_underflow);
+ if (overflow_or_underflow) {
+ new_lower = ValueBound::Min();
+ }
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, false_successor, new_range);
@@ -506,9 +515,10 @@ class BCEVisitor : public HGraphVisitor {
// array.length as a lower bound isn't considered useful.
if (!lower.Equals(ValueBound::Min()) && !lower.IsRelativeToArrayLength()) {
int compensation = (cond == kCondGT) ? 1 : 0; // lower bound is inclusive
- ValueBound new_lower = lower.Add(compensation, true, &overflow_or_underflow);
- // overflow_or_underflow is ignored here since we already use ValueBound::Max()
- // for upper bound.
+ ValueBound new_lower = lower.Add(compensation, &overflow_or_underflow);
+ if (overflow_or_underflow) {
+ new_lower = ValueBound::Min();
+ }
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), new_lower, ValueBound::Max());
ApplyRangeFromComparison(left, block, true_successor, new_range);
@@ -516,9 +526,10 @@ class BCEVisitor : public HGraphVisitor {
if (!upper.Equals(ValueBound::Max())) {
int compensation = (cond == kCondGE) ? -1 : 0; // upper bound is inclusive
- ValueBound new_upper = upper.Add(compensation, false, &overflow_or_underflow);
- // overflow_or_underflow is ignored here since we already use ValueBound::Min()
- // for lower bound.
+ ValueBound new_upper = upper.Add(compensation, &overflow_or_underflow);
+ if (overflow_or_underflow) {
+ new_upper = ValueBound::Max();
+ }
ValueRange* new_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper);
ApplyRangeFromComparison(left, block, false_successor, new_range);
@@ -533,8 +544,8 @@ class BCEVisitor : public HGraphVisitor {
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr) {
- ValueBound lower = ValueBound::Create(nullptr, 0); // constant 0
- ValueBound upper = ValueBound::Create(array_length, -1); // array_length - 1
+ ValueBound lower = ValueBound(nullptr, 0); // constant 0
+ ValueBound upper = ValueBound(array_length, -1); // array_length - 1
ValueRange* array_range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), lower, upper);
if (index_range->FitsIn(array_range)) {
@@ -555,7 +566,7 @@ class BCEVisitor : public HGraphVisitor {
}
// Once we have an array access like 'array[5] = 1', we record array.length >= 6.
- ValueBound lower = ValueBound::Create(nullptr, constant + 1);
+ ValueBound lower = ValueBound(nullptr, constant + 1);
ValueBound upper = ValueBound::Max();
ValueRange* range = new (GetGraph()->GetArena())
ValueRange(GetGraph()->GetArena(), lower, upper);
@@ -584,18 +595,35 @@ class BCEVisitor : public HGraphVisitor {
if (left == phi && right->IsIntConstant()) {
HInstruction* initial_value = phi->InputAt(0);
ValueRange* range = nullptr;
- if (right->AsIntConstant()->GetValue() == 0) {
+ int increment = right->AsIntConstant()->GetValue();
+ if (increment == 0) {
// Add constant 0. It's really a fixed value.
range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
- ValueBound::Create(initial_value, 0),
- ValueBound::Create(initial_value, 0));
+ ValueBound(initial_value, 0),
+ ValueBound(initial_value, 0));
} else {
// Monotonically increasing/decreasing.
- range = MonotonicValueRange::Create(
+ bool found;
+ ValueBound bound = ValueBound::DetectValueBoundFromValue(
+ initial_value, &found);
+ if (!found) {
+ // No constant or array.length+c bound found.
+ // For i=j, we can still use j's upper bound as i's upper bound.
+ // Same for lower.
+ ValueRange* initial_range = LookupValueRange(initial_value, phi->GetBlock());
+ if (initial_range != nullptr) {
+ bound = increment > 0 ? initial_range->GetLower() :
+ initial_range->GetUpper();
+ } else {
+ bound = increment > 0 ? ValueBound::Min() : ValueBound::Max();
+ }
+ }
+ range = new (GetGraph()->GetArena()) MonotonicValueRange(
GetGraph()->GetArena(),
initial_value,
- right->AsIntConstant()->GetValue());
+ increment,
+ bound);
}
GetValueRangeMap(phi->GetBlock())->Overwrite(phi->GetId(), range);
}
@@ -662,8 +690,8 @@ class BCEVisitor : public HGraphVisitor {
// gets [-c2, array.length - c1] as its value range.
ValueRange* range = new (GetGraph()->GetArena()) ValueRange(
GetGraph()->GetArena(),
- ValueBound::Create(nullptr, - upper.GetConstant()),
- ValueBound::Create(array_length, - lower.GetConstant()));
+ ValueBound(nullptr, - upper.GetConstant()),
+ ValueBound(array_length, - lower.GetConstant()));
GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range);
}
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0a3f830247..f9054e0133 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -224,44 +224,48 @@ void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
current_block_ = nullptr;
}
-static bool ShouldSkipCompilation(const CompilerDriver& compiler_driver,
- const DexCompilationUnit& dex_compilation_unit,
- size_t number_of_dex_instructions,
- size_t number_of_blocks ATTRIBUTE_UNUSED,
- size_t number_of_branches) {
- const CompilerOptions& compiler_options = compiler_driver.GetCompilerOptions();
+void HGraphBuilder::MaybeRecordStat(MethodCompilationStat compilation_stat) {
+ if (compilation_stats_ != nullptr) {
+ compilation_stats_->RecordStat(compilation_stat);
+ }
+}
+
+bool HGraphBuilder::SkipCompilation(size_t number_of_dex_instructions,
+ size_t number_of_blocks ATTRIBUTE_UNUSED,
+ size_t number_of_branches) {
+ const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerOptions::kEverything) {
return false;
}
if (compiler_options.IsHugeMethod(number_of_dex_instructions)) {
- LOG(INFO) << "Skip compilation of huge method "
- << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
- *dex_compilation_unit.GetDexFile())
- << ": " << number_of_dex_instructions << " dex instructions";
+ VLOG(compiler) << "Skip compilation of huge method "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << ": " << number_of_dex_instructions << " dex instructions";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledHugeMethod);
return true;
}
// If it's large and contains no branches, it's likely to be machine generated initialization.
if (compiler_options.IsLargeMethod(number_of_dex_instructions) && (number_of_branches == 0)) {
- LOG(INFO) << "Skip compilation of large method with no branch "
- << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
- *dex_compilation_unit.GetDexFile())
- << ": " << number_of_dex_instructions << " dex instructions";
+ VLOG(compiler) << "Skip compilation of large method with no branch "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << ": " << number_of_dex_instructions << " dex instructions";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledLargeMethodNoBranches);
return true;
}
return false;
}
-HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
+HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item, int start_instruction_id) {
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
code_start_ = code_ptr;
// Setup the graph with the entry block and exit block.
- graph_ = new (arena_) HGraph(arena_);
+ graph_ = new (arena_) HGraph(arena_, start_instruction_id);
entry_block_ = new (arena_) HBasicBlock(graph_, 0);
graph_->AddBlock(entry_block_);
exit_block_ = new (arena_) HBasicBlock(graph_, kNoDexPc);
@@ -269,7 +273,7 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
graph_->SetExitBlock(exit_block_);
InitializeLocals(code_item.registers_size_);
- graph_->UpdateMaximumNumberOfOutVRegs(code_item.outs_size_);
+ graph_->SetMaximumNumberOfOutVRegs(code_item.outs_size_);
// Compute the number of dex instructions, blocks, and branches. We will
// check these values against limits given to the compiler.
@@ -283,14 +287,9 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
code_ptr, code_end, &number_of_dex_instructions, &number_of_blocks, &number_of_branches);
// Note that the compiler driver is null when unit testing.
- if (compiler_driver_ != nullptr) {
- if (ShouldSkipCompilation(*compiler_driver_,
- *dex_compilation_unit_,
- number_of_dex_instructions,
- number_of_blocks,
- number_of_branches)) {
- return nullptr;
- }
+ if ((compiler_driver_ != nullptr)
+ && SkipCompilation(number_of_dex_instructions, number_of_blocks, number_of_branches)) {
+ return nullptr;
}
// Also create blocks for catch handlers.
@@ -319,7 +318,9 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// Update the current block if dex_pc starts a new block.
MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) {
+ return nullptr;
+ }
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -593,8 +594,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
if (!compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method)) {
- LOG(INFO) << "Did not compile " << PrettyMethod(method_idx, *dex_file_)
- << " because a method call could not be resolved";
+ VLOG(compiler) << "Did not compile " << PrettyMethod(method_idx, *dex_file_)
+ << " because a method call could not be resolved";
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod);
return false;
}
DCHECK(optimized_invoke_type != kSuper);
@@ -611,9 +613,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
// Sharpening to kDirect only works if we compile PIC.
DCHECK((optimized_invoke_type == invoke_type) || (optimized_invoke_type != kDirect)
|| compiler_driver_->GetCompilerOptions().GetCompilePic());
- // Treat invoke-direct like static calls for now.
- invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
+ invoke = new (arena_) HInvokeStaticOrDirect(
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index,
+ optimized_invoke_type);
}
size_t start_index = 0;
@@ -636,6 +638,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
<< " at " << dex_pc;
// We do not implement non sequential register pair.
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledNonSequentialRegPair);
return false;
}
HInstruction* arg = LoadLocal(is_range ? register_index + i : args[i], type);
@@ -664,9 +667,11 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa)));
if (resolved_field.Get() == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
return false;
}
if (resolved_field->IsVolatile()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
return false;
}
@@ -704,40 +709,53 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
- uint32_t storage_index;
- bool is_referrers_class;
- bool is_initialized;
- bool is_volatile;
- MemberOffset field_offset(0u);
- Primitive::Type field_type;
-
- bool fast_path = compiler_driver_->ComputeStaticFieldInfo(field_index,
- dex_compilation_unit_,
- is_put,
- &field_offset,
- &storage_index,
- &is_referrers_class,
- &is_volatile,
- &is_initialized,
- &field_type);
- if (!fast_path) {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(
+ dex_compilation_unit_->GetClassLinker()->FindDexCache(*dex_compilation_unit_->GetDexFile())));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
+ Handle<mirror::ArtField> resolved_field(hs.NewHandle(compiler_driver_->ResolveField(
+ soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true)));
+
+ if (resolved_field.Get() == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
+ return false;
+ }
+
+ if (resolved_field->IsVolatile()) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledVolatile);
return false;
}
- if (is_volatile) {
+ Handle<mirror::Class> referrer_class(hs.NewHandle(compiler_driver_->ResolveCompilingMethodsClass(
+ soa, dex_cache, class_loader, outer_compilation_unit_)));
+
+ // The index at which the field's class is stored in the DexCache's type array.
+ uint32_t storage_index;
+ std::pair<bool, bool> pair = compiler_driver_->IsFastStaticField(
+ dex_cache.Get(), referrer_class.Get(), resolved_field.Get(), field_index, &storage_index);
+ bool can_easily_access = is_put ? pair.second : pair.first;
+ if (!can_easily_access) {
return false;
}
- HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, dex_pc);
+ // TODO: find out why this check is needed.
+ bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
+ *outer_compilation_unit_->GetDexFile(), storage_index);
+ bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
+ bool is_referrer_class = (referrer_class.Get() == resolved_field->GetDeclaringClass());
+
+ HLoadClass* constant = new (arena_) HLoadClass(storage_index, is_referrer_class, dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
- if (!is_initialized) {
+ if (!is_initialized && !is_referrer_class) {
cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
+ Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
// We need to keep the class alive before loading the value.
Temporaries temps(graph_);
@@ -745,9 +763,10 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(value->GetType(), field_type);
current_block_->AddInstruction(
- new (arena_) HStaticFieldSet(cls, value, field_type, field_offset));
+ new (arena_) HStaticFieldSet(cls, value, field_type, resolved_field->GetOffset()));
} else {
- current_block_->AddInstruction(new (arena_) HStaticFieldGet(cls, field_type, field_offset));
+ current_block_->AddInstruction(
+ new (arena_) HStaticFieldGet(cls, field_type, resolved_field->GetOffset()));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
return true;
@@ -942,15 +961,20 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
uint32_t dex_pc) {
bool type_known_final;
bool type_known_abstract;
- bool is_referrers_class;
+ // `CanAccessTypeWithoutChecks` will tell whether the method being
+ // built is trying to access its own class, so that the generated
+ // code can optimize for this case. However, the optimization does not
+ // work for inlining, so we use `IsCompilingClass` instead.
+ bool dont_use_is_referrers_class;
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
+ &type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
if (!can_access) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, IsCompilingClass(type_index), dex_pc);
current_block_->AddInstruction(cls);
// The class needs a temporary before being used by the type check.
Temporaries temps(graph_);
@@ -967,7 +991,7 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
return true;
}
-bool HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
+void HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc) {
SwitchTable table(instruction, dex_pc, false);
// Value to test against.
@@ -984,10 +1008,9 @@ bool HGraphBuilder::BuildPackedSwitch(const Instruction& instruction, uint32_t d
BuildSwitchCaseHelper(instruction, i, i == num_entries, table, value, starting_key + i - 1,
table.GetEntryAt(i), dex_pc);
}
- return true;
}
-bool HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
+void HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc) {
SwitchTable table(instruction, dex_pc, true);
// Value to test against.
@@ -1001,7 +1024,6 @@ bool HGraphBuilder::BuildSparseSwitch(const Instruction& instruction, uint32_t d
BuildSwitchCaseHelper(instruction, i, i == static_cast<size_t>(num_entries) - 1, table, value,
table.GetEntryAt(i), table.GetEntryAt(i + num_entries), dex_pc);
}
- return true;
}
void HGraphBuilder::BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
@@ -1923,15 +1945,20 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint16_t type_index = instruction.VRegB_21c();
bool type_known_final;
bool type_known_abstract;
- bool is_referrers_class;
+ bool dont_use_is_referrers_class;
+ // `CanAccessTypeWithoutChecks` will tell whether the method being
+ // built is trying to access its own class, so that the generated
+ // code can optimize for this case. However, the optimization does not
+ // work for inlining, so we use `IsCompilingClass` instead.
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
+ &type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
if (!can_access) {
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc));
+ new (arena_) HLoadClass(type_index, IsCompilingClass(type_index), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1989,20 +2016,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::PACKED_SWITCH: {
- if (!BuildPackedSwitch(instruction, dex_pc)) {
- return false;
- }
+ BuildPackedSwitch(instruction, dex_pc);
break;
}
case Instruction::SPARSE_SWITCH: {
- if (!BuildSparseSwitch(instruction, dex_pc)) {
- return false;
- }
+ BuildSparseSwitch(instruction, dex_pc);
break;
}
default:
+ VLOG(compiler) << "Did not compile "
+ << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_)
+ << " because of unhandled instruction "
+ << instruction.Name();
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledUnhandledInstruction);
return false;
}
return true;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 73c2f50958..cc5f6a04dc 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -21,6 +21,7 @@
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
+#include "optimizing_compiler_stats.h"
#include "primitive.h"
#include "utils/arena_object.h"
#include "utils/growable_array.h"
@@ -35,8 +36,10 @@ class HGraphBuilder : public ValueObject {
public:
HGraphBuilder(ArenaAllocator* arena,
DexCompilationUnit* dex_compilation_unit,
+ const DexCompilationUnit* const outer_compilation_unit,
const DexFile* dex_file,
- CompilerDriver* driver)
+ CompilerDriver* driver,
+ OptimizingCompilerStats* compiler_stats)
: arena_(arena),
branch_targets_(arena, 0),
locals_(arena, 0),
@@ -49,9 +52,11 @@ class HGraphBuilder : public ValueObject {
dex_file_(dex_file),
dex_compilation_unit_(dex_compilation_unit),
compiler_driver_(driver),
+ outer_compilation_unit_(outer_compilation_unit),
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
- latest_result_(nullptr) {}
+ latest_result_(nullptr),
+ compilation_stats_(compiler_stats) {}
// Only for unit testing.
HGraphBuilder(ArenaAllocator* arena, Primitive::Type return_type = Primitive::kPrimInt)
@@ -67,11 +72,13 @@ class HGraphBuilder : public ValueObject {
dex_file_(nullptr),
dex_compilation_unit_(nullptr),
compiler_driver_(nullptr),
+ outer_compilation_unit_(nullptr),
return_type_(return_type),
code_start_(nullptr),
- latest_result_(nullptr) {}
+ latest_result_(nullptr),
+ compilation_stats_(nullptr) {}
- HGraph* BuildGraph(const DexFile::CodeItem& code);
+ HGraph* BuildGraph(const DexFile::CodeItem& code, int start_instruction_id = 0);
private:
// Analyzes the dex instruction and adds HInstruction to the graph
@@ -205,16 +212,30 @@ class HGraphBuilder : public ValueObject {
uint32_t dex_pc);
// Builds an instruction sequence for a packed switch statement.
- bool BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc);
+ void BuildPackedSwitch(const Instruction& instruction, uint32_t dex_pc);
// Builds an instruction sequence for a sparse switch statement.
- bool BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc);
+ void BuildSparseSwitch(const Instruction& instruction, uint32_t dex_pc);
void BuildSwitchCaseHelper(const Instruction& instruction, size_t index,
bool is_last_case, const SwitchTable& table,
HInstruction* value, int32_t case_value_int,
int32_t target_offset, uint32_t dex_pc);
+ bool SkipCompilation(size_t number_of_dex_instructions,
+ size_t number_of_blocks,
+ size_t number_of_branches);
+
+ void MaybeRecordStat(MethodCompilationStat compilation_stat);
+
+ // Returns whether `type_index` points to the outer-most compiling method's class.
+ bool IsCompilingClass(uint16_t type_index) const {
+ uint32_t referrer_index = outer_compilation_unit_->GetDexMethodIndex();
+ const DexFile::MethodId& method_id =
+ outer_compilation_unit_->GetDexFile()->GetMethodId(referrer_index);
+ return method_id.class_idx_ == type_index;
+ }
+
ArenaAllocator* const arena_;
// A list of the size of the dex code holding block information for
@@ -232,9 +253,21 @@ class HGraphBuilder : public ValueObject {
HIntConstant* constant0_;
HIntConstant* constant1_;
+ // The dex file where the method being compiled is.
const DexFile* const dex_file_;
+
+ // The compilation unit of the current method being compiled. Note that
+ // it can be an inlined method.
DexCompilationUnit* const dex_compilation_unit_;
+
CompilerDriver* const compiler_driver_;
+
+ // The compilation unit of the outermost method being compiled. That is the
+ // method being compiled (and not inlined), and potentially inlining other
+ // methods.
+ const DexCompilationUnit* const outer_compilation_unit_;
+
+ // The return type of the method being compiled.
const Primitive::Type return_type_;
// The pointer in the dex file where the instructions of the code item
@@ -245,6 +278,8 @@ class HGraphBuilder : public ValueObject {
// used by move-result instructions.
HInstruction* latest_result_;
+ OptimizingCompilerStats* compilation_stats_;
+
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 461409ddca..6f424ce11d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -565,10 +565,19 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, uint32_t dex_pc) {
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
- } else {
- DCHECK(current->IsIntConstant());
+ } else if (current->IsDoubleConstant()) {
+ int64_t value = bit_cast<double, int64_t>(current->AsDoubleConstant()->GetValue());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value));
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, High32Bits(value));
+ ++i;
+ DCHECK_LT(i, environment_size);
+ } else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value);
+ } else {
+ DCHECK(current->IsFloatConstant());
+ int32_t value = bit_cast<float, int32_t>(current->AsFloatConstant()->GetValue());
+ stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, value);
}
break;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cbe5f0cc6e..002d9d4449 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1171,7 +1171,7 @@ void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
codegen_->GenerateFrameExit();
}
-void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
HandleInvoke(invoke);
}
@@ -1179,7 +1179,7 @@ void CodeGeneratorARM::LoadCurrentMethod(Register reg) {
__ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
}
-void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
// TODO: Implement all kinds of calls:
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a61ef2d4f6..c7517d3abc 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -90,10 +90,12 @@ int ARTRegCodeFromVIXL(int code) {
}
Register XRegisterFrom(Location location) {
+ DCHECK(location.IsRegister());
return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
}
Register WRegisterFrom(Location location) {
+ DCHECK(location.IsRegister());
return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
}
@@ -112,10 +114,12 @@ Register InputRegisterAt(HInstruction* instr, int input_index) {
}
FPRegister DRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister());
return FPRegister::DRegFromCode(location.reg());
}
FPRegister SRegisterFrom(Location location) {
+ DCHECK(location.IsFpuRegister());
return FPRegister::SRegFromCode(location.reg());
}
@@ -133,6 +137,11 @@ FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
instr->InputAt(input_index)->GetType());
}
+CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
+ return IsFPType(type) ? CPURegister(FPRegisterFrom(location, type))
+ : CPURegister(RegisterFrom(location, type));
+}
+
CPURegister OutputCPURegister(HInstruction* instr) {
return IsFPType(instr->GetType()) ? static_cast<CPURegister>(OutputFPRegister(instr))
: static_cast<CPURegister>(OutputRegister(instr));
@@ -266,14 +275,32 @@ class SlowPathCodeARM64 : public SlowPathCode {
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- BoundsCheckSlowPathARM64() {}
+ BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
+ Location index_location,
+ Location length_location)
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
+
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- __ Brk(__LINE__); // TODO: Unimplemented BoundsCheckSlowPathARM64.
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(
+ index_location_, LocationFrom(calling_convention.GetRegisterAt(0)),
+ length_location_, LocationFrom(calling_convention.GetRegisterAt(1)));
+ arm64_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
private:
+ HBoundsCheck* const instruction_;
+ const Location index_location_;
+ const Location length_location_;
+
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
};
@@ -322,7 +349,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
Primitive::Type type = at_->GetType();
- arm64_codegen->MoveHelper(out, calling_convention.GetReturnLocation(type), type);
+ arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
codegen->RestoreLiveRegisters(locations);
@@ -364,7 +391,7 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
arm64_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
Primitive::Type type = instruction_->GetType();
- arm64_codegen->MoveHelper(locations->Out(), calling_convention.GetReturnLocation(type), type);
+ arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
codegen->RestoreLiveRegisters(locations);
__ B(GetExitLabel());
@@ -445,15 +472,51 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
- TypeCheckSlowPathARM64() {}
+ TypeCheckSlowPathARM64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
+ : instruction_(instruction),
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+
__ Bind(GetEntryLabel());
- __ Brk(__LINE__); // TODO: Unimplemented TypeCheckSlowPathARM64.
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ codegen->EmitParallelMoves(
+ class_to_check_, LocationFrom(calling_convention.GetRegisterAt(0)),
+ object_class_, LocationFrom(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ Primitive::Type ret_type = instruction_->GetType();
+ Location ret_loc = calling_convention.GetReturnLocation(ret_type);
+ arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
+
+ codegen->RestoreLiveRegisters(locations);
__ B(GetExitLabel());
}
private:
+ HInstruction* const instruction_;
+ const Location class_to_check_;
+ const Location object_class_;
+ uint32_t dex_pc_;
+
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
};
@@ -487,7 +550,8 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
kNumberOfAllocatableRegisterPairs),
block_labels_(nullptr),
location_builder_(graph, this),
- instruction_visitor_(graph, this) {}
+ instruction_visitor_(graph, this),
+ move_resolver_(graph->GetArena(), this) {}
#undef __
#define __ GetVIXLAssembler()->
@@ -498,6 +562,24 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
+void ParallelMoveResolverARM64::EmitMove(size_t index) {
+ MoveOperands* move = moves_.Get(index);
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource());
+}
+
+void ParallelMoveResolverARM64::EmitSwap(size_t index) {
+ MoveOperands* move = moves_.Get(index);
+ codegen_->SwapLocations(move->GetDestination(), move->GetSource());
+}
+
+void ParallelMoveResolverARM64::RestoreScratch(int reg) {
+ __ Pop(Register(VIXLRegCodeFromART(reg), kXRegSize));
+}
+
+void ParallelMoveResolverARM64::SpillScratch(int reg) {
+ __ Push(Register(VIXLRegCodeFromART(reg), kXRegSize));
+}
+
void CodeGeneratorARM64::GenerateFrameEntry() {
bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
if (do_overflow_check) {
@@ -571,18 +653,18 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- MoveHelper(location, temp_location, type);
+ MoveLocation(location, temp_location, type);
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
if (Is64BitType(type)) {
- MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
} else {
- MoveHelper(location, Location::StackSlot(stack_slot), type);
+ MoveLocation(location, Location::StackSlot(stack_slot), type);
}
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- MoveHelper(location, locations->Out(), type);
+ MoveLocation(location, locations->Out(), type);
}
}
@@ -665,6 +747,30 @@ Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
}
}
+size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
+ Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
+ __ Str(reg, MemOperand(sp, stack_index));
+ return kArm64WordSize;
+}
+
+size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
+ Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
+ __ Ldr(reg, MemOperand(sp, stack_index));
+ return kArm64WordSize;
+}
+
+size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ FPRegister reg = FPRegister(reg_id, kDRegSize);
+ __ Str(reg, MemOperand(sp, stack_index));
+ return kArm64WordSize;
+}
+
+size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
+ FPRegister reg = FPRegister(reg_id, kDRegSize);
+ __ Ldr(reg, MemOperand(sp, stack_index));
+ return kArm64WordSize;
+}
+
void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
}
@@ -686,58 +792,162 @@ void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* consta
}
}
-void CodeGeneratorARM64::MoveHelper(Location destination,
- Location source,
- Primitive::Type type) {
+
+static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
+ DCHECK(constant.IsConstant());
+ HConstant* cst = constant.GetConstant();
+ return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
+ (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
+ (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
+ (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
+}
+
+void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- Register dst = RegisterFrom(destination, type);
- if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
- DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
- __ Ldr(dst, StackOperandFrom(source));
- } else {
- __ Mov(dst, OperandFrom(source, type));
+
+ // A valid move can always be inferred from the destination and source
+ // locations. When moving from and to a register, the argument type can be
+ // used to generate 32bit instead of 64bit moves. In debug mode we also
+ // checks the coherency of the locations and the type.
+ bool unspecified_type = (type == Primitive::kPrimVoid);
+
+ if (destination.IsRegister() || destination.IsFpuRegister()) {
+ if (unspecified_type) {
+ HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
+ if (source.IsStackSlot() ||
+ (src_cst != nullptr && (src_cst->IsIntConstant() || src_cst->IsFloatConstant()))) {
+ // For stack slots and 32bit constants, a 64bit type is appropriate.
+ type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ } else {
+ // If the source is a double stack slot or a 64bit constant, a 64bit
+ // type is appropriate. Else the source is a register, and since the
+ // type has not been specified, we chose a 64bit type to force a 64bit
+ // move.
+ type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ }
}
- } else if (destination.IsFpuRegister()) {
- FPRegister dst = FPRegisterFrom(destination, type);
+ DCHECK((destination.IsFpuRegister() && IsFPType(type)) ||
+ (destination.IsRegister() && !IsFPType(type)));
+ CPURegister dst = CPURegisterFrom(destination, type);
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
__ Ldr(dst, StackOperandFrom(source));
- } else if (source.IsFpuRegister()) {
- __ Fmov(dst, FPRegisterFrom(source, type));
- } else {
+ } else if (source.IsConstant()) {
+ DCHECK(CoherentConstantAndType(source, type));
MoveConstant(dst, source.GetConstant());
+ } else {
+ if (destination.IsRegister()) {
+ __ Mov(Register(dst), RegisterFrom(source, type));
+ } else {
+ __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
+ }
}
- } else {
+
+ } else { // The destination is not a register. It must be a stack slot.
DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
- } else if (source.IsFpuRegister()) {
- __ Str(FPRegisterFrom(source, type), StackOperandFrom(destination));
+ if (source.IsRegister() || source.IsFpuRegister()) {
+ if (unspecified_type) {
+ if (source.IsRegister()) {
+ type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ } else {
+ type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ }
+ }
+ DCHECK((destination.IsDoubleStackSlot() == Is64BitType(type)) &&
+ (source.IsFpuRegister() == IsFPType(type)));
+ __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
+ DCHECK(unspecified_type || CoherentConstantAndType(source, type));
UseScratchRegisterScope temps(GetVIXLAssembler());
- HConstant* cst = source.GetConstant();
+ HConstant* src_cst = source.GetConstant();
CPURegister temp;
- if (cst->IsIntConstant() || cst->IsLongConstant()) {
- temp = cst->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ if (src_cst->IsIntConstant()) {
+ temp = temps.AcquireW();
+ } else if (src_cst->IsLongConstant()) {
+ temp = temps.AcquireX();
+ } else if (src_cst->IsFloatConstant()) {
+ temp = temps.AcquireS();
} else {
- DCHECK(cst->IsFloatConstant() || cst->IsDoubleConstant());
- temp = cst->IsFloatConstant() ? temps.AcquireS() : temps.AcquireD();
+ DCHECK(src_cst->IsDoubleConstant());
+ temp = temps.AcquireD();
}
- MoveConstant(temp, cst);
+ MoveConstant(temp, src_cst);
__ Str(temp, StackOperandFrom(destination));
} else {
DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
+ DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ // There is generally less pressure on FP registers.
+ FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
__ Ldr(temp, StackOperandFrom(source));
__ Str(temp, StackOperandFrom(destination));
}
}
}
+void CodeGeneratorARM64::SwapLocations(Location loc1, Location loc2) {
+ DCHECK(!loc1.IsConstant());
+ DCHECK(!loc2.IsConstant());
+
+ if (loc1.Equals(loc2)) {
+ return;
+ }
+
+ UseScratchRegisterScope temps(GetAssembler()->vixl_masm_);
+
+ bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
+ bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
+ bool is_fp_reg1 = loc1.IsFpuRegister();
+ bool is_fp_reg2 = loc2.IsFpuRegister();
+
+ if (loc2.IsRegister() && loc1.IsRegister()) {
+ Register r1 = XRegisterFrom(loc1);
+ Register r2 = XRegisterFrom(loc2);
+ Register tmp = temps.AcquireSameSizeAs(r1);
+ __ Mov(tmp, r2);
+ __ Mov(r2, r1);
+ __ Mov(r1, tmp);
+ } else if (is_fp_reg2 && is_fp_reg1) {
+ FPRegister r1 = DRegisterFrom(loc1);
+ FPRegister r2 = DRegisterFrom(loc2);
+ FPRegister tmp = temps.AcquireSameSizeAs(r1);
+ __ Fmov(tmp, r2);
+ __ Fmov(r2, r1);
+ __ Fmov(r1, tmp);
+ } else if (is_slot1 != is_slot2) {
+ MemOperand mem = StackOperandFrom(is_slot1 ? loc1 : loc2);
+ Location reg_loc = is_slot1 ? loc2 : loc1;
+ CPURegister reg, tmp;
+ if (reg_loc.IsFpuRegister()) {
+ reg = DRegisterFrom(reg_loc);
+ tmp = temps.AcquireD();
+ } else {
+ reg = XRegisterFrom(reg_loc);
+ tmp = temps.AcquireX();
+ }
+ __ Ldr(tmp, mem);
+ __ Str(reg, mem);
+ if (reg_loc.IsFpuRegister()) {
+ __ Fmov(FPRegister(reg), FPRegister(tmp));
+ } else {
+ __ Mov(Register(reg), Register(tmp));
+ }
+ } else if (is_slot1 && is_slot2) {
+ MemOperand mem1 = StackOperandFrom(loc1);
+ MemOperand mem2 = StackOperandFrom(loc2);
+ Register tmp1 = loc1.IsStackSlot() ? temps.AcquireW() : temps.AcquireX();
+ Register tmp2 = temps.AcquireSameSizeAs(tmp1);
+ __ Ldr(tmp1, mem1);
+ __ Ldr(tmp2, mem2);
+ __ Str(tmp1, mem2);
+ __ Str(tmp2, mem1);
+ } else {
+ LOG(FATAL) << "Unimplemented";
+ }
+}
+
void CodeGeneratorARM64::Load(Primitive::Type type,
vixl::CPURegister dst,
const vixl::MemOperand& src) {
@@ -850,7 +1060,7 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
- M(ParallelMove) \
+ /* No unimplemented IR. */
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
@@ -1113,7 +1323,9 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
}
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64();
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
+ instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
__ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
@@ -1125,22 +1337,24 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
instruction, LocationSummary::kCallOnSlowPath);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
- UseScratchRegisterScope temps(GetVIXLAssembler());
+ LocationSummary* locations = instruction->GetLocations();
Register obj = InputRegisterAt(instruction, 0);;
Register cls = InputRegisterAt(instruction, 1);;
- Register temp = temps.AcquireW();
+ Register obj_cls = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
- SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
+ instruction, locations->InAt(1), LocationFrom(obj_cls), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
// TODO: avoid this check if we know obj is not null.
__ Cbz(obj, slow_path->GetExitLabel());
// Compare the class of `obj` with `cls`.
- __ Ldr(temp, HeapOperand(obj, mirror::Object::ClassOffset()));
- __ Cmp(temp, cls);
+ __ Ldr(obj_cls, HeapOperand(obj, mirror::Object::ClassOffset()));
+ __ Cmp(obj_cls, cls);
__ B(ne, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -1316,12 +1530,20 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
codegen_->AddSlowPath(slow_path);
Location value = instruction->GetLocations()->InAt(0);
+ Primitive::Type type = instruction->GetType();
+
+ if ((type != Primitive::kPrimInt) && (type != Primitive::kPrimLong)) {
+ LOG(FATAL) << "Unexpected type " << type << "for DivZeroCheck.";
+ return;
+ }
+
if (value.IsConstant()) {
int64_t divisor = Int64ConstantFrom(value);
if (divisor == 0) {
__ B(slow_path->GetEntryLabel());
} else {
- LOG(FATAL) << "Divisions by non-null constants should have been optimized away.";
+ // A division by a non-null constant is valid. We don't need to perform
+ // any check, so simply fall through.
}
} else {
__ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
@@ -1496,7 +1718,8 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
SlowPathCodeARM64* slow_path =
- new (GetGraph()->GetArena()) TypeCheckSlowPathARM64();
+ new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -1573,11 +1796,11 @@ void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
HandleInvoke(invoke);
}
-void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
HandleInvoke(invoke);
}
-void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
// Make sure that ArtMethod* is passed in W0 as per the calling convention
DCHECK(temp.Is(w0));
@@ -1914,6 +2137,14 @@ void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
HandleBinaryOp(instruction);
}
+void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
+ codegen_->GetMoveResolver()->EmitNativeCode(instruction);
+}
+
void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
@@ -1989,7 +2220,7 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
UNUSED(instruction);
codegen_->GenerateFrameExit();
- __ Br(lr);
+ __ Ret();
}
void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
@@ -1999,7 +2230,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
UNUSED(instruction);
codegen_->GenerateFrameExit();
- __ Br(lr);
+ __ Ret();
}
void LocationsBuilderARM64::VisitShl(HShl* shl) {
@@ -2157,17 +2388,18 @@ void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* convers
if (IsIntegralType(result_type) && IsIntegralType(input_type)) {
int result_size = Primitive::ComponentSize(result_type);
int input_size = Primitive::ComponentSize(input_type);
- int min_size = kBitsPerByte * std::min(result_size, input_size);
+ int min_size = std::min(result_size, input_size);
Register output = OutputRegister(conversion);
Register source = InputRegisterAt(conversion, 0);
- if ((result_type == Primitive::kPrimChar) ||
- ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
- __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size);
+ if ((result_type == Primitive::kPrimChar) && (input_size < result_size)) {
+ __ Ubfx(output, source, 0, result_size * kBitsPerByte);
+ } else if ((result_type == Primitive::kPrimChar) ||
+ ((input_type == Primitive::kPrimChar) && (result_size > input_size))) {
+ __ Ubfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
} else {
- __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size);
+ __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
}
} else if (IsFPType(result_type) && IsIntegralType(input_type)) {
- CHECK(input_type == Primitive::kPrimInt || input_type == Primitive::kPrimLong);
__ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
} else if (IsIntegralType(result_type) && IsFPType(input_type)) {
CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 0e3d25f9aa..1d5bfb734e 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -139,6 +139,27 @@ class LocationsBuilderARM64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
};
+class ParallelMoveResolverARM64 : public ParallelMoveResolver {
+ public:
+ ParallelMoveResolverARM64(ArenaAllocator* allocator, CodeGeneratorARM64* codegen)
+ : ParallelMoveResolver(allocator), codegen_(codegen) {}
+
+ void EmitMove(size_t index) OVERRIDE;
+ void EmitSwap(size_t index) OVERRIDE;
+ void RestoreScratch(int reg) OVERRIDE;
+ void SpillScratch(int reg) OVERRIDE;
+
+ private:
+ Arm64Assembler* GetAssembler() const;
+ vixl::MacroAssembler* GetVIXLAssembler() const {
+ return GetAssembler()->vixl_masm_;
+ }
+
+ CodeGeneratorARM64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverARM64);
+};
+
class CodeGeneratorARM64 : public CodeGenerator {
public:
explicit CodeGeneratorARM64(HGraph* graph);
@@ -193,19 +214,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
- UNUSED(stack_index);
- UNUSED(reg_id);
- LOG(INFO) << "CodeGeneratorARM64::SaveCoreRegister()";
- return kArm64WordSize;
- }
-
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
- UNUSED(stack_index);
- UNUSED(reg_id);
- LOG(INFO) << "CodeGeneratorARM64::RestoreCoreRegister()";
- return kArm64WordSize;
- }
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
// The number of registers that can be allocated. The register allocator may
// decide to reserve and not use a few of them.
@@ -237,7 +249,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
- void MoveHelper(Location destination, Location source, Primitive::Type type);
+ // The type is optional. When specified it must be coherent with the
+ // locations, and is used for optimisation and debugging.
+ void MoveLocation(Location destination, Location source,
+ Primitive::Type type = Primitive::kPrimVoid);
+ void SwapLocations(Location loc_1, Location loc_2);
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadCurrentMethod(vixl::Register current_method);
@@ -245,10 +261,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Generate code to invoke a runtime entry point.
void InvokeRuntime(int32_t offset, HInstruction* instruction, uint32_t dex_pc);
- ParallelMoveResolver* GetMoveResolver() OVERRIDE {
- UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
- return nullptr;
- }
+ ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
private:
// Labels for each block that will be compiled.
@@ -256,11 +269,16 @@ class CodeGeneratorARM64 : public CodeGenerator {
LocationsBuilderARM64 location_builder_;
InstructionCodeGeneratorARM64 instruction_visitor_;
+ ParallelMoveResolverARM64 move_resolver_;
Arm64Assembler assembler_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
+inline Arm64Assembler* ParallelMoveResolverARM64::GetAssembler() const {
+ return codegen_->GetAssembler();
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8a0c2deab9..e7edd8a805 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1115,11 +1115,11 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
__ ret();
}
-void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
HandleInvoke(invoke);
}
-void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
// TODO: Implement all kinds of calls:
@@ -2391,7 +2391,7 @@ void InstructionCodeGeneratorX86::HandleShift(HBinaryOperation* op) {
__ shrl(first_reg, second_reg);
}
} else {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue);
if (op->IsShl()) {
__ shll(first_reg, imm);
} else if (op->IsShr()) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 233f4a4e4b..ff7fcdcbac 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1122,11 +1122,11 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
return Location();
}
-void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
HandleInvoke(invoke);
}
-void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -2223,7 +2223,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) {
__ shrl(first_reg, second_reg);
}
} else {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxIntShiftValue);
if (op->IsShl()) {
__ shll(first_reg, imm);
} else if (op->IsShr()) {
@@ -2245,7 +2245,7 @@ void InstructionCodeGeneratorX86_64::HandleShift(HBinaryOperation* op) {
__ shrq(first_reg, second_reg);
}
} else {
- Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue() & kMaxLongShiftValue);
if (op->IsShl()) {
__ shlq(first_reg, imm);
} else if (op->IsShr()) {
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index fee3ea6f8c..8b75cc7c65 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -129,12 +129,15 @@ static void RunCodeOptimized(HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
Expected expected) {
- if (kRuntimeISA == kX86) {
- x86::CodeGeneratorX86 codegenX86(graph);
- RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
- } else if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
+ if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
arm::CodeGeneratorARM codegenARM(graph);
RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kArm64) {
+ arm64::CodeGeneratorARM64 codegenARM64(graph);
+ RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kX86) {
+ x86::CodeGeneratorX86 codegenX86(graph);
+ RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kX86_64) {
x86_64::CodeGeneratorX86_64 codegenX86_64(graph);
RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
@@ -362,11 +365,7 @@ NOT_LONG_TEST(ReturnNotLongINT64_MAX,
#undef NOT_LONG_TEST
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_IntToLongOfLongToInt) {
-#else
TEST(CodegenTest, IntToLongOfLongToInt) {
-#endif
const int64_t input = INT64_C(4294967296); // 2^32
const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
const uint16_t word1 = High16Bits(Low32Bits(input));
@@ -493,10 +492,8 @@ TEST(CodegenTest, NonMaterializedCondition) {
TestCode(data, true, 12); \
}
-#if !defined(__aarch64__)
MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
-#endif
TEST(CodegenTest, ReturnMulIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
@@ -633,11 +630,7 @@ TEST(CodegenTest, MaterializedCondition2) {
}
}
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnDivIntLit8) {
-#else
TEST(CodegenTest, ReturnDivIntLit8) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::DIV_INT_LIT8, 3 << 8 | 0,
@@ -646,11 +639,7 @@ TEST(CodegenTest, ReturnDivIntLit8) {
TestCode(data, true, 1);
}
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnDivInt2Addr) {
-#else
TEST(CodegenTest, ReturnDivInt2Addr) {
-#endif
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::CONST_4 | 2 << 12 | 1 << 8,
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index a56b9d9a12..cad6683577 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -38,8 +38,7 @@ static void TestCode(const uint16_t* data,
HGraph* graph = CreateCFG(&allocator, data, return_type);
ASSERT_NE(graph, nullptr);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
+ graph->TryBuildingSsa();
StringPrettyPrinter printer_before(graph);
printer_before.VisitInsertionOrder();
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 5d4b9cb024..3dbd04e250 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -32,8 +32,7 @@ static void TestCode(const uint16_t* data,
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
+ graph->TryBuildingSsa();
StringPrettyPrinter printer_before(graph);
printer_before.VisitInsertionOrder();
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 39def82007..923468ff16 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -62,7 +62,7 @@ static void TestCodeSSA(const uint16_t* data) {
ASSERT_NE(graph, nullptr);
graph->BuildDominatorTree();
- graph->TransformToSSA();
+ graph->TransformToSsa();
SSAChecker ssa_checker(&allocator, graph);
ssa_checker.Run();
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 4ed2156241..5d1703e237 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -17,7 +17,6 @@
#include "graph_visualizer.h"
#include "code_generator.h"
-#include "driver/dex_compilation_unit.h"
#include "nodes.h"
#include "ssa_liveness_analysis.h"
@@ -270,39 +269,20 @@ HGraphVisualizer::HGraphVisualizer(std::ostream* output,
HGraph* graph,
const char* string_filter,
const CodeGenerator& codegen,
- const DexCompilationUnit& cu)
+ const char* method_name)
: output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
if (output == nullptr) {
return;
}
- std::string pretty_name = PrettyMethod(cu.GetDexMethodIndex(), *cu.GetDexFile());
- if (pretty_name.find(string_filter) == std::string::npos) {
+ if (strstr(method_name, string_filter) == nullptr) {
return;
}
is_enabled_ = true;
HGraphVisualizerPrinter printer(graph, *output_, "", codegen_);
printer.StartTag("compilation");
- printer.PrintProperty("name", pretty_name.c_str());
- printer.PrintProperty("method", pretty_name.c_str());
- printer.PrintTime("date");
- printer.EndTag("compilation");
-}
-
-HGraphVisualizer::HGraphVisualizer(std::ostream* output,
- HGraph* graph,
- const CodeGenerator& codegen,
- const char* name)
- : output_(output), graph_(graph), codegen_(codegen), is_enabled_(false) {
- if (output == nullptr) {
- return;
- }
-
- is_enabled_ = true;
- HGraphVisualizerPrinter printer(graph, *output_, "", codegen_);
- printer.StartTag("compilation");
- printer.PrintProperty("name", name);
- printer.PrintProperty("method", name);
+ printer.PrintProperty("name", method_name);
+ printer.PrintProperty("method", method_name);
printer.PrintTime("date");
printer.EndTag("compilation");
}
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 60d996ba88..b5baed9c99 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -47,16 +47,7 @@ class HGraphVisualizer : public ValueObject {
HGraph* graph,
const char* string_filter,
const CodeGenerator& codegen,
- const DexCompilationUnit& cu);
-
- /**
- * Version of `HGraphVisualizer` for unit testing, that is when a
- * `DexCompilationUnit` is not available.
- */
- HGraphVisualizer(std::ostream* output,
- HGraph* graph,
- const CodeGenerator& codegen,
- const char* name);
+ const char* method_name);
/**
* If this visualizer is enabled, emit the compilation information
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index a6a68ca59d..94ff192264 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -59,8 +59,7 @@ TEST(GVNTest, LocalFieldElimination) {
ASSERT_EQ(different_offset->GetBlock(), block);
ASSERT_EQ(use_after_kill->GetBlock(), block);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
+ graph->TryBuildingSsa();
GlobalValueNumberer(&allocator, graph).Run();
ASSERT_TRUE(to_remove->GetBlock() == nullptr);
@@ -108,8 +107,7 @@ TEST(GVNTest, GlobalFieldElimination) {
new (&allocator) HInstanceFieldGet(parameter, Primitive::kPrimBoolean, MemberOffset(42)));
join->AddInstruction(new (&allocator) HExit());
- graph->BuildDominatorTree();
- graph->TransformToSSA();
+ graph->TryBuildingSsa();
GlobalValueNumberer(&allocator, graph).Run();
// Check that all field get instructions have been GVN'ed.
@@ -173,9 +171,7 @@ TEST(GVNTest, LoopFieldElimination) {
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
ASSERT_EQ(field_get_in_exit->GetBlock(), exit);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
GlobalValueNumberer(&allocator, graph).Run();
// Check that all field get instructions are still there.
@@ -237,9 +233,7 @@ TEST(GVNTest, LoopSideEffects) {
inner_loop_exit->AddInstruction(new (&allocator) HGoto());
outer_loop_exit->AddInstruction(new (&allocator) HExit());
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
*outer_loop_header->GetLoopInformation()));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
new file mode 100644
index 0000000000..1de5b78121
--- /dev/null
+++ b/compiler/optimizing/inliner.cc
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "inliner.h"
+
+#include "builder.h"
+#include "class_linker.h"
+#include "constant_folding.h"
+#include "dead_code_elimination.h"
+#include "driver/compiler_driver-inl.h"
+#include "driver/dex_compilation_unit.h"
+#include "instruction_simplifier.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "nodes.h"
+#include "ssa_phi_elimination.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+static constexpr int kMaxInlineCodeUnits = 100;
+static constexpr int kMaxInlineNumberOfBlocks = 3;
+
+void HInliner::Run() {
+ for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+ for (HInstructionIterator instr_it(it.Current()->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ HInvokeStaticOrDirect* current = instr_it.Current()->AsInvokeStaticOrDirect();
+ if (current != nullptr) {
+ if (!TryInline(current, current->GetIndexInDexCache(), current->GetInvokeType())) {
+ if (kIsDebugBuild) {
+ std::string callee_name =
+ PrettyMethod(current->GetIndexInDexCache(), *outer_compilation_unit_.GetDexFile());
+ bool should_inline = callee_name.find("$inline$") != std::string::npos;
+ CHECK(!should_inline) << "Could not inline " << callee_name;
+ }
+ }
+ }
+ }
+ }
+}
+
+bool HInliner::TryInline(HInvoke* invoke_instruction,
+ uint32_t method_index,
+ InvokeType invoke_type) const {
+ ScopedObjectAccess soa(Thread::Current());
+ const DexFile& outer_dex_file = *outer_compilation_unit_.GetDexFile();
+ VLOG(compiler) << "Try inlining " << PrettyMethod(method_index, outer_dex_file);
+
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(outer_compilation_unit_.GetClassLinker()->FindDexCache(outer_dex_file)));
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(outer_compilation_unit_.GetClassLoader())));
+ Handle<mirror::ArtMethod> resolved_method(hs.NewHandle(
+ compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, &outer_compilation_unit_, method_index, invoke_type)));
+
+ if (resolved_method.Get() == nullptr) {
+ VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, outer_dex_file);
+ return false;
+ }
+
+ if (resolved_method->GetDexFile()->GetLocation().compare(outer_dex_file.GetLocation()) != 0) {
+ VLOG(compiler) << "Did not inline "
+ << PrettyMethod(method_index, outer_dex_file)
+ << " because it is in a different dex file";
+ return false;
+ }
+
+ const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+
+ if (code_item == nullptr) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " is not inlined because it is native";
+ return false;
+ }
+
+ if (code_item->insns_size_in_code_units_ > kMaxInlineCodeUnits) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " is too big to inline";
+ return false;
+ }
+
+ if (code_item->tries_size_ != 0) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " is not inlined because of try block";
+ return false;
+ }
+
+ if (!resolved_method->GetDeclaringClass()->IsVerified()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " is not inlined because its class could not be verified";
+ return false;
+ }
+
+ DexCompilationUnit dex_compilation_unit(
+ nullptr,
+ outer_compilation_unit_.GetClassLoader(),
+ outer_compilation_unit_.GetClassLinker(),
+ outer_dex_file,
+ code_item,
+ resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
+ method_index,
+ resolved_method->GetAccessFlags(),
+ nullptr);
+
+ OptimizingCompilerStats inline_stats;
+ HGraphBuilder builder(graph_->GetArena(),
+ &dex_compilation_unit,
+ &outer_compilation_unit_,
+ &outer_dex_file,
+ compiler_driver_,
+ &inline_stats);
+ HGraph* callee_graph = builder.BuildGraph(*code_item, graph_->GetCurrentInstructionId());
+
+ if (callee_graph == nullptr) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " could not be built, so cannot be inlined";
+ return false;
+ }
+
+ if (callee_graph->GetBlocks().Size() > kMaxInlineNumberOfBlocks) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " has too many blocks to be inlined: "
+ << callee_graph->GetBlocks().Size();
+ return false;
+ }
+
+ if (!callee_graph->TryBuildingSsa()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " could not be transformed to SSA";
+ return false;
+ }
+
+ HReversePostOrderIterator it(*callee_graph);
+ it.Advance(); // Past the entry block to avoid seeing the suspend check.
+ for (; !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (block->IsLoopHeader()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " could not be inlined because it contains a loop";
+ return false;
+ }
+
+ for (HInstructionIterator instr_it(block->GetInstructions());
+ !instr_it.Done();
+ instr_it.Advance()) {
+ HInstruction* current = instr_it.Current();
+ if (current->CanThrow()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " could not be inlined because " << current->DebugName()
+ << " can throw";
+ return false;
+ }
+
+ if (current->NeedsEnvironment()) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file)
+ << " could not be inlined because " << current->DebugName()
+ << " needs an environment";
+ return false;
+ }
+ }
+ }
+
+ // Run simple optimizations on the graph.
+ SsaRedundantPhiElimination redundant_phi(callee_graph);
+ SsaDeadPhiElimination dead_phi(callee_graph);
+ HDeadCodeElimination dce(callee_graph);
+ HConstantFolding fold(callee_graph);
+ InstructionSimplifier simplify(callee_graph);
+
+ HOptimization* optimizations[] = {
+ &redundant_phi,
+ &dead_phi,
+ &dce,
+ &fold,
+ &simplify,
+ };
+
+ for (size_t i = 0; i < arraysize(optimizations); ++i) {
+ HOptimization* optimization = optimizations[i];
+ optimization->Run();
+ }
+
+ callee_graph->InlineInto(graph_, invoke_instruction);
+ VLOG(compiler) << "Successfully inlined " << PrettyMethod(method_index, outer_dex_file);
+ outer_stats_->RecordStat(kInlinedInvoke);
+ return true;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
new file mode 100644
index 0000000000..370e33c2a1
--- /dev/null
+++ b/compiler/optimizing/inliner.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INLINER_H_
+#define ART_COMPILER_OPTIMIZING_INLINER_H_
+
+#include "invoke_type.h"
+#include "optimization.h"
+
+namespace art {
+
+class CompilerDriver;
+class DexCompilationUnit;
+class HGraph;
+class HInvoke;
+class OptimizingCompilerStats;
+
+class HInliner : public HOptimization {
+ public:
+ HInliner(HGraph* outer_graph,
+ const DexCompilationUnit& outer_compilation_unit,
+ CompilerDriver* compiler_driver,
+ OptimizingCompilerStats* stats)
+ : HOptimization(outer_graph, true, "inliner"),
+ outer_compilation_unit_(outer_compilation_unit),
+ compiler_driver_(compiler_driver),
+ outer_stats_(stats) {}
+
+ void Run() OVERRIDE;
+
+ private:
+ bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
+
+ const DexCompilationUnit& outer_compilation_unit_;
+ CompilerDriver* const compiler_driver_;
+ OptimizingCompilerStats* const outer_stats_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInliner);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INLINER_H_
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 28ca5e81e6..59404dcb14 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -42,9 +42,7 @@ static void TestCode(const uint16_t* data, const int* expected_order, size_t num
HGraph* graph = builder.BuildGraph(*item);
ASSERT_NE(graph, nullptr);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
x86::CodeGeneratorX86 codegen(graph);
SsaLivenessAnalysis liveness(*graph, &codegen);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 5c7e6f0325..007c43e218 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -36,9 +36,7 @@ static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
return graph;
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 4b69e57960..6f706c391d 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -48,9 +48,7 @@ static void TestCode(const uint16_t* data, const char* expected) {
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
ASSERT_NE(graph, nullptr);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
x86::CodeGeneratorX86 codegen(graph);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index ba4dccf598..fb941b542f 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -167,7 +167,7 @@ void HGraph::VisitBlockForDominatorTree(HBasicBlock* block,
}
}
-void HGraph::TransformToSSA() {
+void HGraph::TransformToSsa() {
DCHECK(!reverse_post_order_.IsEmpty());
SsaBuilder ssa_builder(this);
ssa_builder.BuildSsa();
@@ -682,4 +682,81 @@ std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind&
return os;
}
+void HInstruction::InsertBefore(HInstruction* cursor) {
+ next_->previous_ = previous_;
+ if (previous_ != nullptr) {
+ previous_->next_ = next_;
+ }
+ if (block_->instructions_.first_instruction_ == this) {
+ block_->instructions_.first_instruction_ = next_;
+ }
+
+ previous_ = cursor->previous_;
+ if (previous_ != nullptr) {
+ previous_->next_ = this;
+ }
+ next_ = cursor;
+ cursor->previous_ = this;
+ block_ = cursor->block_;
+}
+
+void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
+ // We currently only support graphs with one entry block, one body block, and one exit block.
+ DCHECK_EQ(GetBlocks().Size(), 3u);
+
+ // Walk over the entry block and:
+ // - Move constants from the entry block to the outer_graph's entry block,
+ // - Replace HParameterValue instructions with their real value.
+ // - Remove suspend checks, that hold an environment.
+ int parameter_index = 0;
+ for (HInstructionIterator it(entry_block_->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsConstant()) {
+ current->InsertBefore(outer_graph->GetEntryBlock()->GetLastInstruction());
+ } else if (current->IsParameterValue()) {
+ current->ReplaceWith(invoke->InputAt(parameter_index++));
+ } else {
+ DCHECK(current->IsGoto() || current->IsSuspendCheck());
+ entry_block_->RemoveInstruction(current);
+ }
+ }
+
+ // Insert the body's instructions except the last, just after the `invoke`
+ // instruction.
+ HBasicBlock* body = GetBlocks().Get(1);
+ DCHECK(!body->IsExitBlock());
+ HInstruction* last = body->GetLastInstruction();
+ HInstruction* first = body->GetFirstInstruction();
+
+ if (first != last) {
+ HInstruction* antelast = last->GetPrevious();
+
+ // Update the instruction list of the body to only contain the last
+ // instruction.
+ last->previous_ = nullptr;
+ body->instructions_.first_instruction_ = last;
+ body->instructions_.last_instruction_ = last;
+
+ // Update the instruction list of the `invoke`'s block to now contain the
+ // body's instructions.
+ antelast->next_ = invoke->GetNext();
+ antelast->next_->previous_ = antelast;
+ first->previous_ = invoke;
+ invoke->next_ = first;
+
+ // Update the block pointer of all instructions.
+ for (HInstruction* current = antelast; current != invoke; current = current->GetPrevious()) {
+ current->SetBlock(invoke->GetBlock());
+ }
+ }
+
+ // Finally, replace the invoke with the return value of the inlined graph.
+ if (last->IsReturn()) {
+ invoke->ReplaceWith(last->InputAt(0));
+ body->RemoveInstruction(last);
+ } else {
+ DCHECK(last->IsReturnVoid());
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 8a25de19d9..c963b70492 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_H_
#define ART_COMPILER_OPTIMIZING_NODES_H_
+#include "invoke_type.h"
#include "locations.h"
#include "offsets.h"
#include "primitive.h"
@@ -30,6 +31,7 @@ class HBasicBlock;
class HEnvironment;
class HInstruction;
class HIntConstant;
+class HInvoke;
class HGraphVisitor;
class HPhi;
class HSuspendCheck;
@@ -75,6 +77,8 @@ class HInstructionList {
HInstruction* last_instruction_;
friend class HBasicBlock;
+ friend class HGraph;
+ friend class HInstruction;
friend class HInstructionIterator;
friend class HBackwardInstructionIterator;
@@ -84,7 +88,7 @@ class HInstructionList {
// Control-flow graph of a method. Contains a list of basic blocks.
class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
- explicit HGraph(ArenaAllocator* arena)
+ HGraph(ArenaAllocator* arena, int start_instruction_id = 0)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
@@ -94,7 +98,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
number_of_vregs_(0),
number_of_in_vregs_(0),
temporaries_vreg_slots_(0),
- current_instruction_id_(0) {}
+ current_instruction_id_(start_instruction_id) {}
ArenaAllocator* GetArena() const { return arena_; }
const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -108,8 +112,16 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
void AddBlock(HBasicBlock* block);
+ // Try building the SSA form of this graph, with dominance computation and loop
+ // recognition. Returns whether it was successful in doing all these steps.
+ bool TryBuildingSsa() {
+ BuildDominatorTree();
+ TransformToSsa();
+ return AnalyzeNaturalLoops();
+ }
+
void BuildDominatorTree();
- void TransformToSSA();
+ void TransformToSsa();
void SimplifyCFG();
// Analyze all natural loops in this graph. Returns false if one
@@ -117,19 +129,31 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// back edge.
bool AnalyzeNaturalLoops() const;
+ // Inline this graph in `outer_graph`, replacing the given `invoke` instruction.
+ void InlineInto(HGraph* outer_graph, HInvoke* invoke);
+
void SplitCriticalEdge(HBasicBlock* block, HBasicBlock* successor);
void SimplifyLoop(HBasicBlock* header);
- int GetNextInstructionId() {
+ int32_t GetNextInstructionId() {
+ DCHECK_NE(current_instruction_id_, INT32_MAX);
return current_instruction_id_++;
}
+ int32_t GetCurrentInstructionId() const {
+ return current_instruction_id_;
+ }
+
+ void SetCurrentInstructionId(int32_t id) {
+ current_instruction_id_ = id;
+ }
+
uint16_t GetMaximumNumberOfOutVRegs() const {
return maximum_number_of_out_vregs_;
}
- void UpdateMaximumNumberOfOutVRegs(uint16_t new_value) {
- maximum_number_of_out_vregs_ = std::max(new_value, maximum_number_of_out_vregs_);
+ void SetMaximumNumberOfOutVRegs(uint16_t new_value) {
+ maximum_number_of_out_vregs_ = new_value;
}
void UpdateTemporariesVRegSlots(size_t slots) {
@@ -152,10 +176,6 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
number_of_in_vregs_ = value;
}
- uint16_t GetNumberOfInVRegs() const {
- return number_of_in_vregs_;
- }
-
uint16_t GetNumberOfLocalVRegs() const {
return number_of_vregs_ - number_of_in_vregs_;
}
@@ -200,8 +220,9 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
size_t temporaries_vreg_slots_;
// The current id to assign to a newly added instruction. See HInstruction.id_.
- int current_instruction_id_;
+ int32_t current_instruction_id_;
+ ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -474,6 +495,9 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
size_t lifetime_end_;
bool is_catch_block_;
+ friend class HGraph;
+ friend class HInstruction;
+
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
@@ -503,7 +527,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(InvokeInterface, Invoke) \
- M(InvokeStatic, Invoke) \
+ M(InvokeStaticOrDirect, Invoke) \
M(InvokeVirtual, Invoke) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
@@ -748,6 +772,9 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
void ReplaceWith(HInstruction* instruction);
void ReplaceInput(HInstruction* replacement, size_t index);
+ // Insert `this` instruction in `cursor`'s graph, just before `cursor`.
+ void InsertBefore(HInstruction* cursor);
+
bool HasOnlyOneUse() const {
return uses_ != nullptr && uses_->GetTail() == nullptr;
}
@@ -836,6 +863,7 @@ class HInstruction : public ArenaObject<kArenaAllocMisc> {
const SideEffects side_effects_;
friend class HBasicBlock;
+ friend class HGraph;
friend class HInstructionList;
DISALLOW_COPY_AND_ASSIGN(HInstruction);
@@ -1595,24 +1623,28 @@ class HInvoke : public HInstruction {
DISALLOW_COPY_AND_ASSIGN(HInvoke);
};
-class HInvokeStatic : public HInvoke {
+class HInvokeStaticOrDirect : public HInvoke {
public:
- HInvokeStatic(ArenaAllocator* arena,
- uint32_t number_of_arguments,
- Primitive::Type return_type,
- uint32_t dex_pc,
- uint32_t index_in_dex_cache)
+ HInvokeStaticOrDirect(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t index_in_dex_cache,
+ InvokeType invoke_type)
: HInvoke(arena, number_of_arguments, return_type, dex_pc),
- index_in_dex_cache_(index_in_dex_cache) {}
+ index_in_dex_cache_(index_in_dex_cache),
+ invoke_type_(invoke_type) {}
uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
+ InvokeType GetInvokeType() const { return invoke_type_; }
- DECLARE_INSTRUCTION(InvokeStatic);
+ DECLARE_INSTRUCTION(InvokeStaticOrDirect);
private:
const uint32_t index_in_dex_cache_;
+ const InvokeType invoke_type_;
- DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
+ DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};
class HInvokeVirtual : public HInvoke {
@@ -2425,7 +2457,7 @@ class HLoadString : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
-// TODO: Pass this check to HInvokeStatic nodes.
+// TODO: Pass this check to HInvokeStaticOrDirect nodes.
/**
* Performs an initialization check on its Class object input.
*/
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 11fc9bf9b9..deebaf7414 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -30,6 +30,7 @@
#include "elf_writer_quick.h"
#include "graph_visualizer.h"
#include "gvn.h"
+#include "inliner.h"
#include "instruction_simplifier.h"
#include "jni/quick/jni_compiler.h"
#include "mirror/art_method-inl.h"
@@ -121,9 +122,8 @@ class OptimizingCompiler FINAL : public Compiler {
// Whether we should run any optimization or register allocation. If false, will
// just run the code generation after the graph was built.
const bool run_optimizations_;
- mutable AtomicInteger total_compiled_methods_;
- mutable AtomicInteger unoptimized_compiled_methods_;
- mutable AtomicInteger optimized_compiled_methods_;
+
+ mutable OptimizingCompilerStats compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -136,24 +136,14 @@ OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
: Compiler(driver, kMaximumCompilationTimeBeforeWarning),
run_optimizations_(
driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
- total_compiled_methods_(0),
- unoptimized_compiled_methods_(0),
- optimized_compiled_methods_(0) {
+ compilation_stats_() {
if (kIsVisualizerEnabled) {
visualizer_output_.reset(new std::ofstream("art.cfg"));
}
}
OptimizingCompiler::~OptimizingCompiler() {
- if (total_compiled_methods_ == 0) {
- LOG(INFO) << "Did not compile any method.";
- } else {
- size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_);
- size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_);
- LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: "
- << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, "
- << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized.";
- }
+ compilation_stats_.Log();
}
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
@@ -192,25 +182,33 @@ static bool CanOptimize(const DexFile::CodeItem& code_item) {
return code_item.tries_size_ == 0;
}
-static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer) {
- HDeadCodeElimination opt1(graph);
- HConstantFolding opt2(graph);
- SsaRedundantPhiElimination opt3(graph);
- SsaDeadPhiElimination opt4(graph);
- InstructionSimplifier opt5(graph);
- GVNOptimization opt6(graph);
+static void RunOptimizations(HGraph* graph,
+ CompilerDriver* driver,
+ OptimizingCompilerStats* stats,
+ const DexCompilationUnit& dex_compilation_unit,
+ const HGraphVisualizer& visualizer) {
+ SsaRedundantPhiElimination redundant_phi(graph);
+ SsaDeadPhiElimination dead_phi(graph);
+ HDeadCodeElimination dce(graph);
+ HConstantFolding fold(graph);
+ InstructionSimplifier simplify1(graph);
+
+ HInliner inliner(graph, dex_compilation_unit, driver, stats);
+
+ GVNOptimization gvn(graph);
BoundsCheckElimination bce(graph);
- InstructionSimplifier opt8(graph);
+ InstructionSimplifier simplify2(graph);
HOptimization* optimizations[] = {
- &opt1,
- &opt2,
- &opt3,
- &opt4,
- &opt5,
- &opt6,
+ &redundant_phi,
+ &dead_phi,
+ &dce,
+ &fold,
+ &simplify1,
+ &inliner,
+ &gvn,
&bce,
- &opt8
+ &simplify2
};
for (size_t i = 0; i < arraysize(optimizations); ++i) {
@@ -221,21 +219,15 @@ static void RunOptimizations(HGraph* graph, const HGraphVisualizer& visualizer)
}
}
-static bool TryBuildingSsa(HGraph* graph,
- const DexCompilationUnit& dex_compilation_unit,
- const HGraphVisualizer& visualizer) {
- graph->BuildDominatorTree();
- graph->TransformToSSA();
-
- if (!graph->AnalyzeNaturalLoops()) {
- LOG(INFO) << "Skipping compilation of "
- << PrettyMethod(dex_compilation_unit.GetDexMethodIndex(),
- *dex_compilation_unit.GetDexFile())
- << ": it contains a non natural loop";
- return false;
+// The stack map we generate must be 4-byte aligned on ARM. Since existing
+// maps are generated alongside these stack maps, we must also align them.
+static std::vector<uint8_t>& AlignVectorSize(std::vector<uint8_t>& vector) {
+ size_t size = vector.size();
+ size_t aligned_size = RoundUp(size, 4);
+ for (; size < aligned_size; ++size) {
+ vector.push_back(0);
}
- visualizer.DumpGraph("ssa transform");
- return true;
+ return vector;
}
CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
@@ -246,7 +238,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
jobject class_loader,
const DexFile& dex_file) const {
UNUSED(invoke_type);
- total_compiled_methods_++;
+ compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
// overflow checks) assume thumb2.
@@ -256,10 +248,12 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -268,16 +262,23 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
class_def_idx, method_idx, access_flags,
GetCompilerDriver()->GetVerifiedMethod(&dex_file, method_idx));
+ std::string method_name = PrettyMethod(method_idx, dex_file);
+
// For testing purposes, we put a special marker on method names that should be compiled
// with this compiler. This makes sure we're not regressing.
- bool shouldCompile = dex_compilation_unit.GetSymbol().find("00024opt_00024") != std::string::npos;
- bool shouldOptimize =
- dex_compilation_unit.GetSymbol().find("00024reg_00024") != std::string::npos;
+ bool shouldCompile = method_name.find("$opt$") != std::string::npos;
+ bool shouldOptimize = method_name.find("$opt$reg$") != std::string::npos;
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file, GetCompilerDriver());
-
+ HGraphBuilder builder(&arena,
+ &dex_compilation_unit,
+ &dex_compilation_unit,
+ &dex_file,
+ GetCompilerDriver(),
+ &compilation_stats_);
+
+ VLOG(compiler) << "Building " << PrettyMethod(method_idx, dex_file);
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
CHECK(!shouldCompile) << "Could not build graph in optimizing compiler";
@@ -287,25 +288,30 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
if (codegen == nullptr) {
CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
HGraphVisualizer visualizer(
- visualizer_output_.get(), graph, kStringFilter, *codegen, dex_compilation_unit);
+ visualizer_output_.get(), graph, kStringFilter, *codegen, method_name.c_str());
visualizer.DumpGraph("builder");
CodeVectorAllocator allocator;
- if (run_optimizations_
- && CanOptimize(*code_item)
- && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ bool can_optimize = CanOptimize(*code_item);
+ bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
+ if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << PrettyMethod(method_idx, dex_file);
- optimized_compiled_methods_++;
- if (!TryBuildingSsa(graph, dex_compilation_unit, visualizer)) {
+ if (!graph->TryBuildingSsa()) {
+ LOG(INFO) << "Skipping compilation of "
+ << PrettyMethod(method_idx, dex_file)
+ << ": it contains a non natural loop";
// We could not transform the graph to SSA, bailout.
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
return nullptr;
}
- RunOptimizations(graph, visualizer);
+ RunOptimizations(
+ graph, GetCompilerDriver(), &compilation_stats_, dex_compilation_unit, visualizer);
PrepareForRegisterAllocation(graph).Run();
SsaLivenessAnalysis liveness(*graph, codegen);
@@ -318,29 +324,31 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
visualizer.DumpGraph(kRegisterAllocatorPassName);
codegen->CompileOptimized(&allocator);
- std::vector<uint8_t> mapping_table;
- SrcMap src_mapping_table;
- codegen->BuildMappingTable(&mapping_table,
- GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
- &src_mapping_table : nullptr);
-
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledOptimized);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),
codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
- mapping_table,
stack_map);
} else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
} else {
VLOG(compiler) << "Compile baseline " << PrettyMethod(method_idx, dex_file);
- unoptimized_compiled_methods_++;
+
+ if (!run_optimizations_) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedDisabled);
+ } else if (!can_optimize) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedTryCatch);
+ } else if (!can_allocate_registers) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
+ }
+
codegen->CompileBaseline(&allocator);
std::vector<uint8_t> mapping_table;
@@ -353,6 +361,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::vector<uint8_t> gc_map;
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledBaseline);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),
@@ -360,9 +369,9 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
&src_mapping_table,
- mapping_table,
- vmap_table,
- gc_map,
+ AlignVectorSize(mapping_table),
+ AlignVectorSize(vmap_table),
+ AlignVectorSize(gc_map),
nullptr);
}
}
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index a415eca2d0..d076fb55f2 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -24,6 +24,6 @@ class CompilerDriver;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
-}
+} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
new file mode 100644
index 0000000000..7993b19850
--- /dev/null
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
+
+#include <sstream>
+#include <string>
+
+#include "atomic.h"
+
+namespace art {
+
+enum MethodCompilationStat {
+ kAttemptCompilation = 0,
+ kCompiledBaseline,
+ kCompiledOptimized,
+ kInlinedInvoke,
+ kNotCompiledUnsupportedIsa,
+ kNotCompiledPathological,
+ kNotCompiledHugeMethod,
+ kNotCompiledLargeMethodNoBranches,
+ kNotCompiledCannotBuildSSA,
+ kNotCompiledNoCodegen,
+ kNotCompiledUnresolvedMethod,
+ kNotCompiledUnresolvedField,
+ kNotCompiledNonSequentialRegPair,
+ kNotCompiledVolatile,
+ kNotOptimizedTryCatch,
+ kNotOptimizedDisabled,
+ kNotCompiledCantAccesType,
+ kNotOptimizedRegisterAllocator,
+ kNotCompiledUnhandledInstruction,
+ kLastStat
+};
+
+class OptimizingCompilerStats {
+ public:
+ OptimizingCompilerStats() {}
+
+ void RecordStat(MethodCompilationStat stat) {
+ compile_stats_[stat]++;
+ }
+
+ void Log() const {
+ if (compile_stats_[kAttemptCompilation] == 0) {
+ LOG(INFO) << "Did not compile any method.";
+ } else {
+ size_t unoptimized_percent =
+ compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
+ size_t optimized_percent =
+ compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
+ std::ostringstream oss;
+ oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: "
+ << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, "
+ << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized.";
+ for (int i = 0; i < kLastStat; i++) {
+ if (compile_stats_[i] != 0) {
+ oss << "\n" << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
+ }
+ }
+ LOG(INFO) << oss.str();
+ }
+ }
+
+ private:
+ std::string PrintMethodCompilationStat(int stat) const {
+ switch (stat) {
+ case kAttemptCompilation : return "kAttemptCompilation";
+ case kCompiledBaseline : return "kCompiledBaseline";
+ case kCompiledOptimized : return "kCompiledOptimized";
+ case kInlinedInvoke : return "kInlinedInvoke";
+ case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
+ case kNotCompiledPathological : return "kNotCompiledPathological";
+ case kNotCompiledHugeMethod : return "kNotCompiledHugeMethod";
+ case kNotCompiledLargeMethodNoBranches : return "kNotCompiledLargeMethodNoBranches";
+ case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
+ case kNotCompiledNoCodegen : return "kNotCompiledNoCodegen";
+ case kNotCompiledUnresolvedMethod : return "kNotCompiledUnresolvedMethod";
+ case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
+ case kNotCompiledNonSequentialRegPair : return "kNotCompiledNonSequentialRegPair";
+ case kNotCompiledVolatile : return "kNotCompiledVolatile";
+ case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
+ case kNotOptimizedTryCatch : return "kNotOptimizedTryCatch";
+ case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
+ case kNotOptimizedRegisterAllocator : return "kNotOptimizedRegisterAllocator";
+ case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
+ default: LOG(FATAL) << "invalid stat";
+ }
+ return "";
+ }
+
+ AtomicInteger compile_stats_[kLastStat];
+
+ DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_STATS_H_
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index a6c06359a0..c1c805dc56 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -64,15 +64,17 @@ bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph,
if (!Supports(instruction_set)) {
return false;
}
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ return true;
+ }
for (size_t i = 0, e = graph.GetBlocks().Size(); i < e; ++i) {
for (HInstructionIterator it(graph.GetBlocks().Get(i)->GetInstructions());
!it.Done();
it.Advance()) {
HInstruction* current = it.Current();
- if (current->GetType() == Primitive::kPrimLong && instruction_set != kX86_64) return false;
- if ((current->GetType() == Primitive::kPrimFloat
- || current->GetType() == Primitive::kPrimDouble)
- && instruction_set != kX86_64) {
+ if (current->GetType() == Primitive::kPrimLong ||
+ current->GetType() == Primitive::kPrimFloat ||
+ current->GetType() == Primitive::kPrimDouble) {
return false;
}
}
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 976ee39ca8..cbe741c2b3 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -67,10 +67,11 @@ class RegisterAllocator {
static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
static bool Supports(InstructionSet instruction_set) {
- return instruction_set == kX86
- || instruction_set == kArm
- || instruction_set == kX86_64
- || instruction_set == kThumb2;
+ return instruction_set == kArm
+ || instruction_set == kArm64
+ || instruction_set == kThumb2
+ || instruction_set == kX86
+ || instruction_set == kX86_64;
}
size_t GetNumberOfSpillSlots() const {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 8d75db91d2..f677e840ef 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -39,9 +39,7 @@ static bool Check(const uint16_t* data) {
HGraphBuilder builder(&allocator);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
x86::CodeGeneratorX86 codegen(graph);
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
@@ -253,9 +251,7 @@ static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
HGraphBuilder builder(allocator);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
- graph->BuildDominatorTree();
- graph->TransformToSSA();
- graph->AnalyzeNaturalLoops();
+ graph->TryBuildingSsa();
return graph;
}
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 6174dd49a1..6b6bf05053 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -87,7 +87,7 @@ static void TestCode(const uint16_t* data, const char* expected) {
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
- graph->TransformToSSA();
+ graph->TransformToSsa();
ReNumberInstructions(graph);
// Test that phis had their type set.
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 9cfa71c13f..3974e53e6f 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -111,7 +111,7 @@ class StackMapStream : public ValueObject {
}
size_t ComputeStackMapSize() const {
- return stack_maps_.Size() * (StackMap::kFixedSize + StackMaskEncodingSize(stack_mask_max_));
+ return stack_maps_.Size() * StackMap::ComputeAlignedStackMapSize(stack_mask_max_);
}
size_t ComputeDexRegisterMapSize() const {
diff --git a/compiler/sea_ir/code_gen/code_gen.cc b/compiler/sea_ir/code_gen/code_gen.cc
deleted file mode 100644
index 8d79c41545..0000000000
--- a/compiler/sea_ir/code_gen/code_gen.cc
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <llvm/Support/raw_ostream.h>
-
-#include "base/logging.h"
-#include "utils.h"
-
-#include "sea_ir/ir/sea.h"
-#include "sea_ir/code_gen/code_gen.h"
-#include "sea_ir/types/type_inference.h"
-#include "sea_ir/types/types.h"
-
-namespace sea_ir {
-
-void CodeGenPrepassVisitor::Visit(PhiInstructionNode* phi) {
- Region* r = phi->GetRegion();
- const std::vector<Region*>* predecessors = r->GetPredecessors();
- DCHECK(NULL != predecessors);
- DCHECK_GT(predecessors->size(), 0u);
- llvm::PHINode *llvm_phi = llvm_data_->builder_.CreatePHI(
- llvm::Type::getInt32Ty(*llvm_data_->context_), predecessors->size(), phi->StringId());
- llvm_data_->AddValue(phi, llvm_phi);
-}
-
-void CodeGenPassVisitor::Initialize(SeaGraph* graph) {
- Region* root_region;
- ordered_regions_.clear();
- for (std::vector<Region*>::const_iterator cit = graph->GetRegions()->begin();
- cit != graph->GetRegions()->end(); cit++ ) {
- if ((*cit)->GetIDominator() == (*cit)) {
- root_region = *cit;
- }
- }
- ordered_regions_.push_back(root_region);
- for (unsigned int id = 0; id < ordered_regions_.size(); id++) {
- Region* current_region = ordered_regions_.at(id);
- const std::set<Region*>* dominated_regions = current_region->GetIDominatedSet();
- for (std::set<Region*>::const_iterator cit = dominated_regions->begin();
- cit != dominated_regions->end(); cit++ ) {
- ordered_regions_.push_back(*cit);
- }
- }
-}
-
-void CodeGenPostpassVisitor::Visit(SeaGraph* graph) { }
-void CodeGenVisitor::Visit(SeaGraph* graph) { }
-void CodeGenPrepassVisitor::Visit(SeaGraph* graph) {
- std::vector<SignatureNode*>* parameters = graph->GetParameterNodes();
- // TODO: It may be better to extract correct types from dex
- // instead than from type inference.
- DCHECK(parameters != NULL);
- std::vector<llvm::Type*> parameter_types;
- for (std::vector<SignatureNode*>::const_iterator param_iterator = parameters->begin();
- param_iterator!= parameters->end(); param_iterator++) {
- const Type* param_type = graph->ti_->type_data_.FindTypeOf((*param_iterator)->Id());
- DCHECK(param_type->Equals(graph->ti_->type_cache_->Integer()))
- << "Code generation for types other than integer not implemented.";
- parameter_types.push_back(llvm::Type::getInt32Ty(*llvm_data_->context_));
- }
-
- // TODO: Get correct function return type.
- const Type* return_type = graph->ti_->type_data_.FindTypeOf(-1);
- DCHECK(return_type->Equals(graph->ti_->type_cache_->Integer()))
- << "Code generation for types other than integer not implemented.";
- llvm::FunctionType *function_type = llvm::FunctionType::get(
- llvm::Type::getInt32Ty(*llvm_data_->context_),
- parameter_types, false);
-
- llvm_data_->function_ = llvm::Function::Create(function_type,
- llvm::Function::ExternalLinkage, function_name_, &llvm_data_->module_);
- unsigned param_id = 0;
- for (llvm::Function::arg_iterator arg_it = llvm_data_->function_->arg_begin();
- param_id != llvm_data_->function_->arg_size(); ++arg_it, ++param_id) {
- // TODO: The "+1" is because of the Method parameter on position 0.
- DCHECK(parameters->size() > param_id) << "Insufficient parameters for function signature";
- // Build parameter register name for LLVM IR clarity.
- std::string arg_name = art::StringPrintf("r%d", parameters->at(param_id)->GetResultRegister());
- arg_it->setName(arg_name);
- SignatureNode* parameter = parameters->at(param_id);
- llvm_data_->AddValue(parameter, arg_it);
- }
-
- std::vector<Region*>* regions = &ordered_regions_;
- DCHECK_GT(regions->size(), 0u);
- // Then create all other basic blocks.
- for (std::vector<Region*>::const_iterator cit = regions->begin(); cit != regions->end(); cit++) {
- llvm::BasicBlock* new_basic_block = llvm::BasicBlock::Create(*llvm_data_->context_,
- (*cit)->StringId(), llvm_data_->function_);
- llvm_data_->AddBlock((*cit), new_basic_block);
- }
-}
-
-void CodeGenPrepassVisitor::Visit(Region* region) {
- llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
-}
-void CodeGenPostpassVisitor::Visit(Region* region) {
- llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
-}
-void CodeGenVisitor::Visit(Region* region) {
- llvm_data_->builder_.SetInsertPoint(llvm_data_->GetBlock(region));
-}
-
-
-void CodeGenVisitor::Visit(InstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- DCHECK(0); // This whole function is useful only during development.
-}
-
-void CodeGenVisitor::Visit(UnnamedConstInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "1.Instruction: " << instr << std::endl;
- llvm_data_->AddValue(instruction,
- llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, instruction->GetConstValue())));
-}
-
-void CodeGenVisitor::Visit(ConstInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "1.Instruction: " << instr << std::endl;
- llvm_data_->AddValue(instruction,
- llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, instruction->GetConstValue())));
-}
-void CodeGenVisitor::Visit(ReturnInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "2.Instruction: " << instr << std::endl;
- DCHECK_GT(instruction->GetSSAProducers().size(), 0u);
- llvm::Value* return_value = llvm_data_->GetValue(instruction->GetSSAProducers().at(0));
- llvm_data_->builder_.CreateRet(return_value);
-}
-void CodeGenVisitor::Visit(IfNeInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "3.Instruction: " << instr << std::endl;
- std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
- DCHECK_GT(ssa_uses.size(), 1u);
- InstructionNode* use_l = ssa_uses.at(0);
- llvm::Value* left = llvm_data_->GetValue(use_l);
-
- InstructionNode* use_r = ssa_uses.at(1);
- llvm::Value* right = llvm_data_->GetValue(use_r);
- llvm::Value* ifne = llvm_data_->builder_.CreateICmpNE(left, right, instruction->StringId());
- DCHECK(instruction->GetRegion() != NULL);
- std::vector<Region*>* successors = instruction->GetRegion()->GetSuccessors();
- DCHECK_GT(successors->size(), 0u);
- llvm::BasicBlock* then_block = llvm_data_->GetBlock(successors->at(0));
- llvm::BasicBlock* else_block = llvm_data_->GetBlock(successors->at(1));
-
- llvm_data_->builder_.CreateCondBr(ifne, then_block, else_block);
-}
-
-/*
-void CodeGenVisitor::Visit(AddIntLitInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "4.Instruction: " << instr << std::endl;
- std::vector<InstructionNode*> ssa_uses = instruction->GetSSAUses();
- InstructionNode* use_l = ssa_uses.at(0);
- llvm::Value* left = llvm_data->GetValue(use_l);
- llvm::Value* right = llvm::ConstantInt::get(*llvm_data->context_,
- llvm::APInt(32, instruction->GetConstValue()));
- llvm::Value* result = llvm_data->builder_.CreateAdd(left, right);
- llvm_data->AddValue(instruction, result);
-}
-*/
-void CodeGenVisitor::Visit(MoveResultInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "5.Instruction: " << instr << std::endl;
- // TODO: Currently, this "mov" instruction is simulated by "res = return_register + 0".
- // This is inefficient, but should be optimized out by the coalescing phase of the reg alloc.
- // The TODO is to either ensure that this happens, or to
- // remove the move-result instructions completely from the IR
- // by merging them with the invoke-* instructions,
- // since their purpose of minimizing the number of opcodes in dex is
- // not relevant for the IR. (Will need to have different
- // instruction subclasses for functions and procedures.)
- std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
- InstructionNode* use_l = ssa_uses.at(0);
- llvm::Value* left = llvm_data_->GetValue(use_l);
- llvm::Value* right = llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, 0));
- llvm::Value* result = llvm_data_->builder_.CreateAdd(left, right);
- llvm_data_->AddValue(instruction, result);
-}
-void CodeGenVisitor::Visit(InvokeStaticInstructionNode* invoke) {
- std::string instr = invoke->GetInstruction()->DumpString(NULL);
- std::cout << "6.Instruction: " << instr << std::endl;
- // TODO: Build callee LLVM function name.
- std::string symbol = "dex_";
- symbol += art::MangleForJni(PrettyMethod(invoke->GetCalledMethodIndex(), dex_file_));
- std::string function_name = "dex_int_00020Main_fibonacci_00028int_00029";
- llvm::Function *callee = llvm_data_->module_.getFunction(function_name);
- // TODO: Add proper checking of the matching between formal and actual signature.
- DCHECK(NULL != callee);
- std::vector<llvm::Value*> parameter_values;
- std::vector<InstructionNode*> parameter_sources = invoke->GetSSAProducers();
- // TODO: Replace first parameter with Method argument instead of 0.
- parameter_values.push_back(llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, 0)));
- for (std::vector<InstructionNode*>::const_iterator cit = parameter_sources.begin();
- cit != parameter_sources.end(); ++cit) {
- llvm::Value* parameter_value = llvm_data_->GetValue((*cit));
- DCHECK(NULL != parameter_value);
- parameter_values.push_back(parameter_value);
- }
- llvm::Value* return_value = llvm_data_->builder_.CreateCall(callee,
- parameter_values, invoke->StringId());
- llvm_data_->AddValue(invoke, return_value);
-}
-void CodeGenVisitor::Visit(AddIntInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "7.Instruction: " << instr << std::endl;
- std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
- DCHECK_GT(ssa_uses.size(), 1u);
- InstructionNode* use_l = ssa_uses.at(0);
- InstructionNode* use_r = ssa_uses.at(1);
- llvm::Value* left = llvm_data_->GetValue(use_l);
- llvm::Value* right = llvm_data_->GetValue(use_r);
- llvm::Value* result = llvm_data_->builder_.CreateAdd(left, right);
- llvm_data_->AddValue(instruction, result);
-}
-void CodeGenVisitor::Visit(GotoInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "8.Instruction: " << instr << std::endl;
- std::vector<sea_ir::Region*>* targets = instruction->GetRegion()->GetSuccessors();
- DCHECK_EQ(targets->size(), 1u);
- llvm::BasicBlock* target_block = llvm_data_->GetBlock(targets->at(0));
- llvm_data_->builder_.CreateBr(target_block);
-}
-void CodeGenVisitor::Visit(IfEqzInstructionNode* instruction) {
- std::string instr = instruction->GetInstruction()->DumpString(NULL);
- std::cout << "9. Instruction: " << instr << "; Id: " <<instruction << std::endl;
- std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
- DCHECK_GT(ssa_uses.size(), 0u);
- InstructionNode* use_l = ssa_uses.at(0);
- llvm::Value* left = llvm_data_->GetValue(use_l);
- llvm::Value* ifeqz = llvm_data_->builder_.CreateICmpEQ(left,
- llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt::getNullValue(32)),
- instruction->StringId());
- DCHECK(instruction->GetRegion() != NULL);
- std::vector<Region*>* successors = instruction->GetRegion()->GetSuccessors();
- DCHECK_GT(successors->size(), 0u);
- llvm::BasicBlock* then_block = llvm_data_->GetBlock(successors->at(0));
- llvm::BasicBlock* else_block = llvm_data_->GetBlock(successors->at(1));
- llvm_data_->builder_.CreateCondBr(ifeqz, then_block, else_block);
-}
-
-void CodeGenPostpassVisitor::Visit(PhiInstructionNode* phi) {
- std::cout << "10. Instruction: Phi(" << phi->GetRegisterNumber() << ")" << std::endl;
- Region* r = phi->GetRegion();
- const std::vector<Region*>* predecessors = r->GetPredecessors();
- DCHECK(NULL != predecessors);
- DCHECK_GT(predecessors->size(), 0u);
- // Prepass (CodeGenPrepassVisitor) should create the phi function value.
- llvm::PHINode* llvm_phi = (llvm::PHINode*) llvm_data_->GetValue(phi);
- int predecessor_pos = 0;
- for (std::vector<Region*>::const_iterator cit = predecessors->begin();
- cit != predecessors->end(); ++cit) {
- std::vector<InstructionNode*>* defining_instructions = phi->GetSSAUses(predecessor_pos++);
- DCHECK_EQ(defining_instructions->size(), 1u);
- InstructionNode* defining_instruction = defining_instructions->at(0);
- DCHECK(NULL != defining_instruction);
- Region* incoming_region = *cit;
- llvm::BasicBlock* incoming_basic_block = llvm_data_->GetBlock(incoming_region);
- llvm::Value* incoming_value = llvm_data_->GetValue(defining_instruction);
- llvm_phi->addIncoming(incoming_value, incoming_basic_block);
- }
-}
-
-void CodeGenVisitor::Visit(SignatureNode* signature) {
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
- "Signature nodes must correspond to a single parameter register.";
-}
-void CodeGenPrepassVisitor::Visit(SignatureNode* signature) {
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
- "Signature nodes must correspond to a single parameter register.";
-}
-void CodeGenPostpassVisitor::Visit(SignatureNode* signature) {
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
- "Signature nodes must correspond to a single parameter register.";
-}
-
-} // namespace sea_ir
diff --git a/compiler/sea_ir/code_gen/code_gen.h b/compiler/sea_ir/code_gen/code_gen.h
deleted file mode 100644
index 544e9f0650..0000000000
--- a/compiler/sea_ir/code_gen/code_gen.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
-#define ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
-
-#include "instruction_set.h"
-#include "llvm/Analysis/Verifier.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Analysis/Verifier.h"
-#include "sea_ir/ir/visitor.h"
-
-namespace sea_ir {
-// Abstracts away the containers we use to map SEA IR objects to LLVM IR objects.
-class CodeGenData {
- public:
- explicit CodeGenData(): context_(&llvm::getGlobalContext()), module_("sea_ir", *context_),
- builder_(*context_), function_(), blocks_(), values_() { }
- // Returns the llvm::BasicBlock* corresponding to the sea_ir::Region with id @region_id.
- llvm::BasicBlock* GetBlock(int region_id) {
- std::map<int, llvm::BasicBlock*>::iterator block_it = blocks_.find(region_id);
- DCHECK(block_it != blocks_.end());
- return block_it->second;
- }
- // Returns the llvm::BasicBlock* corresponding top the sea_ir::Region @region.
- llvm::BasicBlock* GetBlock(Region* region) {
- return GetBlock(region->Id());
- }
- // Records @block as corresponding to the sea_ir::Region with id @region_id.
- void AddBlock(int region_id, llvm::BasicBlock* block) {
- blocks_.insert(std::pair<int, llvm::BasicBlock*>(region_id, block));
- }
- // Records @block as corresponding to the sea_ir::Region with @region.
- void AddBlock(Region* region, llvm::BasicBlock* block) {
- AddBlock(region->Id(), block);
- }
-
- llvm::Value* GetValue(int instruction_id) {
- std::map<int, llvm::Value*>::iterator value_it = values_.find(instruction_id);
- DCHECK(value_it != values_.end());
- return value_it->second;
- }
- // Returns the llvm::Value* corresponding to the output of @instruction.
- llvm::Value* GetValue(InstructionNode* instruction) {
- return GetValue(instruction->Id());
- }
- // Records @value as corresponding to the sea_ir::InstructionNode with id @instruction_id.
- void AddValue(int instruction_id, llvm::Value* value) {
- values_.insert(std::pair<int, llvm::Value*>(instruction_id, value));
- }
- // Records @value as corresponding to the sea_ir::InstructionNode @instruction.
- void AddValue(InstructionNode* instruction, llvm::Value* value) {
- AddValue(instruction->Id(), value);
- }
- // Generates and returns in @elf the executable code corresponding to the llvm module
- //
- std::string GetElf(art::InstructionSet instruction_set);
-
- llvm::LLVMContext* const context_;
- llvm::Module module_;
- llvm::IRBuilder<> builder_;
- llvm::Function* function_;
-
- private:
- std::map<int, llvm::BasicBlock*> blocks_;
- std::map<int, llvm::Value*> values_;
-};
-
-class CodeGenPassVisitor: public IRVisitor {
- public:
- explicit CodeGenPassVisitor(CodeGenData* cgd): llvm_data_(cgd) { }
- CodeGenPassVisitor(): llvm_data_(new CodeGenData()) { }
- // Initialize any data structure needed before the start of visiting.
- virtual void Initialize(SeaGraph* graph);
- CodeGenData* GetData() {
- return llvm_data_;
- }
- void Write(std::string file) {
- llvm_data_->module_.dump();
- llvm::verifyFunction(*llvm_data_->function_);
- }
-
- protected:
- CodeGenData* const llvm_data_;
-};
-
-class CodeGenPrepassVisitor: public CodeGenPassVisitor {
- public:
- explicit CodeGenPrepassVisitor(const std::string& function_name):
- function_name_(function_name) { }
- void Visit(SeaGraph* graph);
- void Visit(SignatureNode* region);
- void Visit(Region* region);
- void Visit(InstructionNode* instruction) { }
-
- void Visit(UnnamedConstInstructionNode* instruction) { }
- void Visit(ConstInstructionNode* instruction) { }
- void Visit(ReturnInstructionNode* instruction) { }
- void Visit(IfNeInstructionNode* instruction) { }
- // void Visit(AddIntLitInstructionNode* instruction) { }
- void Visit(MoveResultInstructionNode* instruction) { }
- void Visit(InvokeStaticInstructionNode* instruction) { }
- void Visit(AddIntInstructionNode* instruction) { }
- void Visit(GotoInstructionNode* instruction) { }
- void Visit(IfEqzInstructionNode* instruction) { }
- void Visit(PhiInstructionNode* region);
-
- private:
- std::string function_name_;
-};
-
-class CodeGenPostpassVisitor: public CodeGenPassVisitor {
- public:
- explicit CodeGenPostpassVisitor(CodeGenData* code_gen_data): CodeGenPassVisitor(code_gen_data) { }
- void Visit(SeaGraph* graph);
- void Visit(SignatureNode* region);
- void Visit(Region* region);
- void Visit(InstructionNode* region) { }
- void Visit(UnnamedConstInstructionNode* instruction) { }
- void Visit(ConstInstructionNode* instruction) { }
- void Visit(ReturnInstructionNode* instruction) { }
- void Visit(IfNeInstructionNode* instruction) { }
- // void Visit(AddIntLitInstructionNode* instruction) { }
- void Visit(MoveResultInstructionNode* instruction) { }
- void Visit(InvokeStaticInstructionNode* instruction) { }
- void Visit(AddIntInstructionNode* instruction) { }
- void Visit(GotoInstructionNode* instruction) { }
- void Visit(IfEqzInstructionNode* instruction) { }
- void Visit(PhiInstructionNode* region);
-};
-
-class CodeGenVisitor: public CodeGenPassVisitor {
- public:
- explicit CodeGenVisitor(CodeGenData* code_gen_data,
- const art::DexFile& dex_file): CodeGenPassVisitor(code_gen_data), dex_file_(dex_file) { }
- void Visit(SeaGraph* graph);
- void Visit(SignatureNode* region);
- void Visit(Region* region);
- void Visit(InstructionNode* region);
- void Visit(UnnamedConstInstructionNode* instruction);
- void Visit(ConstInstructionNode* instruction);
- void Visit(ReturnInstructionNode* instruction);
- void Visit(IfNeInstructionNode* instruction);
- void Visit(MoveResultInstructionNode* instruction);
- void Visit(InvokeStaticInstructionNode* instruction);
- void Visit(AddIntInstructionNode* instruction);
- void Visit(GotoInstructionNode* instruction);
- void Visit(IfEqzInstructionNode* instruction);
- void Visit(PhiInstructionNode* region) { }
-
- private:
- std::string function_name_;
- const art::DexFile& dex_file_;
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
diff --git a/compiler/sea_ir/code_gen/code_gen_data.cc b/compiler/sea_ir/code_gen/code_gen_data.cc
deleted file mode 100644
index 17f64db70c..0000000000
--- a/compiler/sea_ir/code_gen/code_gen_data.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string>
-#include <llvm/PassManager.h>
-#include <llvm/Support/TargetRegistry.h>
-#include <llvm/Support/FormattedStream.h>
-#include <llvm/Target/TargetMachine.h>
-#include <llvm/Transforms/IPO.h>
-#include <llvm/Transforms/IPO/PassManagerBuilder.h>
-
-#include "base/logging.h"
-#include "driver/compiler_driver.h"
-#include "sea_ir/ir/sea.h"
-#include "sea_ir/code_gen/code_gen.h"
-
-
-namespace sea_ir {
-std::string CodeGenData::GetElf(art::InstructionSet instruction_set) {
- std::string elf;
- ::llvm::raw_string_ostream out_stream(elf);
- // Lookup the LLVM target
- std::string target_triple;
- std::string target_cpu;
- std::string target_attr;
- art::CompilerDriver::InstructionSetToLLVMTarget(instruction_set,
- target_triple, target_cpu, target_attr);
-
- std::string errmsg;
- const ::llvm::Target* target =
- ::llvm::TargetRegistry::lookupTarget(target_triple, errmsg);
-
- CHECK(target != NULL) << errmsg;
-
- // Target options
- ::llvm::TargetOptions target_options;
- target_options.FloatABIType = ::llvm::FloatABI::Soft;
- target_options.NoFramePointerElim = true;
- target_options.NoFramePointerElimNonLeaf = true;
- target_options.UseSoftFloat = false;
- target_options.EnableFastISel = false;
-
- // Create the ::llvm::TargetMachine
- ::llvm::OwningPtr< ::llvm::TargetMachine> target_machine(
- target->createTargetMachine(target_triple, target_cpu, target_attr, target_options,
- ::llvm::Reloc::Static, ::llvm::CodeModel::Small,
- ::llvm::CodeGenOpt::Aggressive));
-
- CHECK(target_machine.get() != NULL) << "Failed to create target machine";
-
- // Add target data
- const ::llvm::DataLayout* data_layout = target_machine->getDataLayout();
-
- // PassManager for code generation passes
- ::llvm::PassManager pm;
- pm.add(new ::llvm::DataLayout(*data_layout));
-
- // FunctionPassManager for optimization pass
- ::llvm::FunctionPassManager fpm(&module_);
- fpm.add(new ::llvm::DataLayout(*data_layout));
-
- // Add optimization pass
- ::llvm::PassManagerBuilder pm_builder;
- // TODO: Use inliner after we can do IPO.
- pm_builder.Inliner = NULL;
- // pm_builder.Inliner = ::llvm::createFunctionInliningPass();
- // pm_builder.Inliner = ::llvm::createAlwaysInlinerPass();
- // pm_builder.Inliner = ::llvm::createPartialInliningPass();
- pm_builder.OptLevel = 3;
- pm_builder.DisableSimplifyLibCalls = 1;
- pm_builder.DisableUnitAtATime = 1;
- pm_builder.populateFunctionPassManager(fpm);
- pm_builder.populateModulePassManager(pm);
- pm.add(::llvm::createStripDeadPrototypesPass());
- // Add passes to emit ELF image
- {
- ::llvm::formatted_raw_ostream formatted_os(out_stream, false);
- // Ask the target to add backend passes as necessary.
- if (target_machine->addPassesToEmitFile(pm,
- formatted_os,
- ::llvm::TargetMachine::CGFT_ObjectFile,
- true)) {
- LOG(FATAL) << "Unable to generate ELF for this target";
- }
-
- // Run the code generation passes
- pm.run(module_);
- }
- return elf;
-}
-} // namespace sea_ir
diff --git a/compiler/sea_ir/debug/dot_gen.cc b/compiler/sea_ir/debug/dot_gen.cc
deleted file mode 100644
index 9442684a52..0000000000
--- a/compiler/sea_ir/debug/dot_gen.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include "scoped_thread_state_change.h"
-#include "sea_ir/debug/dot_gen.h"
-
-namespace sea_ir {
-
-void DotGenerationVisitor::Initialize(SeaGraph* graph) {
- graph_ = graph;
- Region* root_region;
- ordered_regions_.clear();
- for (std::vector<Region*>::const_iterator cit = graph->GetRegions()->begin();
- cit != graph->GetRegions()->end(); cit++ ) {
- if ((*cit)->GetIDominator() == (*cit)) {
- root_region = *cit;
- }
- }
- ordered_regions_.push_back(root_region);
- for (unsigned int id = 0; id < ordered_regions_.size(); id++) {
- Region* current_region = ordered_regions_.at(id);
- const std::set<Region*>* dominated_regions = current_region->GetIDominatedSet();
- for (std::set<Region*>::const_iterator cit = dominated_regions->begin();
- cit != dominated_regions->end(); cit++ ) {
- ordered_regions_.push_back(*cit);
- }
- }
-}
-
-void DotGenerationVisitor::ToDotSSAEdges(InstructionNode* instruction) {
- std::map<int, InstructionNode*>* definition_edges = instruction->GetSSAProducersMap();
- // SSA definitions:
- for (std::map<int, InstructionNode*>::const_iterator
- def_it = definition_edges->begin();
- def_it != definition_edges->end(); def_it++) {
- if (NULL != def_it->second) {
- dot_text_ += def_it->second->StringId() + " -> ";
- dot_text_ += instruction->StringId() + "[color=gray,label=\"";
- dot_text_ += art::StringPrintf("vR = %d", def_it->first);
- art::SafeMap<int, const Type*>::const_iterator type_it = types_->find(def_it->second->Id());
- if (type_it != types_->end()) {
- art::ScopedObjectAccess soa(art::Thread::Current());
- dot_text_ += "(" + type_it->second->Dump() + ")";
- } else {
- dot_text_ += "()";
- }
- dot_text_ += "\"] ; // SSA edge\n";
- }
- }
-
- // SSA used-by:
- if (options_->WillSaveUseEdges()) {
- std::vector<InstructionNode*>* used_in = instruction->GetSSAConsumers();
- for (std::vector<InstructionNode*>::const_iterator cit = used_in->begin();
- cit != used_in->end(); cit++) {
- dot_text_ += (*cit)->StringId() + " -> " + instruction->StringId() + "[color=gray,label=\"";
- dot_text_ += "\"] ; // SSA used-by edge\n";
- }
- }
-}
-
-void DotGenerationVisitor::ToDotSSAEdges(PhiInstructionNode* instruction) {
- std::vector<InstructionNode*> definition_edges = instruction->GetSSAProducers();
- // SSA definitions:
- for (std::vector<InstructionNode*>::const_iterator
- def_it = definition_edges.begin();
- def_it != definition_edges.end(); def_it++) {
- if (NULL != *def_it) {
- dot_text_ += (*def_it)->StringId() + " -> ";
- dot_text_ += instruction->StringId() + "[color=gray,label=\"";
- dot_text_ += art::StringPrintf("vR = %d", instruction->GetRegisterNumber());
- art::SafeMap<int, const Type*>::const_iterator type_it = types_->find((*def_it)->Id());
- if (type_it != types_->end()) {
- art::ScopedObjectAccess soa(art::Thread::Current());
- dot_text_ += "(" + type_it->second->Dump() + ")";
- } else {
- dot_text_ += "()";
- }
- dot_text_ += "\"] ; // SSA edge\n";
- }
- }
-
- // SSA used-by:
- if (options_->WillSaveUseEdges()) {
- std::vector<InstructionNode*>* used_in = instruction->GetSSAConsumers();
- for (std::vector<InstructionNode*>::const_iterator cit = used_in->begin();
- cit != used_in->end(); cit++) {
- dot_text_ += (*cit)->StringId() + " -> " + instruction->StringId() + "[color=gray,label=\"";
- dot_text_ += "\"] ; // SSA used-by edge\n";
- }
- }
-}
-
-void DotGenerationVisitor::Visit(SignatureNode* parameter) {
- dot_text_ += parameter->StringId() +" [label=\"[" + parameter->StringId() + "] signature:";
- dot_text_ += art::StringPrintf("r%d", parameter->GetResultRegister());
- dot_text_ += "\"] // signature node\n";
- ToDotSSAEdges(parameter);
-}
-
-// Appends to @result a dot language formatted string representing the node and
-// (by convention) outgoing edges, so that the composition of theToDot() of all nodes
-// builds a complete dot graph (without prolog and epilog though).
-void DotGenerationVisitor::Visit(Region* region) {
- dot_text_ += "\n// Region: \nsubgraph " + region->StringId();
- dot_text_ += " { label=\"region " + region->StringId() + "(rpo=";
- dot_text_ += art::StringPrintf("%d", region->GetRPO());
- if (NULL != region->GetIDominator()) {
- dot_text_ += " dom=" + region->GetIDominator()->StringId();
- }
- dot_text_ += ")\";\n";
-
- std::vector<PhiInstructionNode*>* phi_instructions = region->GetPhiNodes();
- for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions->begin();
- cit != phi_instructions->end(); cit++) {
- dot_text_ += (*cit)->StringId() +";\n";
- }
- std::vector<InstructionNode*>* instructions = region->GetInstructions();
- for (std::vector<InstructionNode*>::const_iterator cit = instructions->begin();
- cit != instructions->end(); cit++) {
- dot_text_ += (*cit)->StringId() +";\n";
- }
-
- dot_text_ += "} // End Region.\n";
- std::vector<Region*>* successors = region->GetSuccessors();
- for (std::vector<Region*>::const_iterator cit = successors->begin(); cit != successors->end();
- cit++) {
- DCHECK(NULL != *cit) << "Null successor found for SeaNode" <<
- region->GetLastChild()->StringId() << ".";
- dot_text_ += region->GetLastChild()->StringId() + " -> " +
- (*cit)->GetLastChild()->StringId() +
- "[lhead=" + (*cit)->StringId() + ", " + "ltail=" + region->StringId() + "];\n\n";
- }
-}
-void DotGenerationVisitor::Visit(InstructionNode* instruction) {
- dot_text_ += "// Instruction ("+instruction->StringId()+"): \n" + instruction->StringId() +
- " [label=\"[" + instruction->StringId() + "] " +
- instruction->GetInstruction()->DumpString(graph_->GetDexFile()) + "\"";
- dot_text_ += "];\n";
- ToDotSSAEdges(instruction);
-}
-
-void DotGenerationVisitor::Visit(UnnamedConstInstructionNode* instruction) {
- dot_text_ += "// Instruction ("+instruction->StringId()+"): \n" + instruction->StringId() +
- " [label=\"[" + instruction->StringId() + "] const/x v-3, #" +
- art::StringPrintf("%d", instruction->GetConstValue()) + "\"";
- dot_text_ += "];\n";
- ToDotSSAEdges(instruction);
-}
-
-void DotGenerationVisitor::Visit(PhiInstructionNode* phi) {
- dot_text_ += "// PhiInstruction: \n" + phi->StringId() +
- " [label=\"[" + phi->StringId() + "] PHI(";
- dot_text_ += art::StringPrintf("%d", phi->GetRegisterNumber());
- dot_text_ += ")\"";
- dot_text_ += "];\n";
- ToDotSSAEdges(phi);
-}
-} // namespace sea_ir
diff --git a/compiler/sea_ir/debug/dot_gen.h b/compiler/sea_ir/debug/dot_gen.h
deleted file mode 100644
index a5d681919d..0000000000
--- a/compiler/sea_ir/debug/dot_gen.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
-#define ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
-
-#include "safe_map.h"
-#include "base/stringprintf.h"
-#include "file_output_stream.h"
-#include "os.h"
-#include "sea_ir/ir/sea.h"
-#include "sea_ir/types/type_inference.h"
-
-namespace sea_ir {
-
-class DotConversionOptions {
- public:
- DotConversionOptions(): save_use_edges_(false) { }
- bool WillSaveUseEdges() const {
- return save_use_edges_;
- }
- private:
- bool save_use_edges_;
-};
-
-class DotGenerationVisitor: public IRVisitor {
- public:
- explicit DotGenerationVisitor(const DotConversionOptions* const options,
- art::SafeMap<int, const Type*>* types): graph_(), types_(types), options_(options) { }
-
- virtual void Initialize(SeaGraph* graph);
- // Saves the ssa def->use edges corresponding to @instruction.
- void ToDotSSAEdges(InstructionNode* instruction);
- void ToDotSSAEdges(PhiInstructionNode* instruction);
- void Visit(SeaGraph* graph) {
- dot_text_ += "digraph seaOfNodes {\ncompound=true\n";
- }
- void Visit(SignatureNode* parameter);
-
- // Appends to @result a dot language formatted string representing the node and
- // (by convention) outgoing edges, so that the composition of theToDot() of all nodes
- // builds a complete dot graph (without prolog and epilog though).
- void Visit(Region* region);
- void Visit(InstructionNode* instruction);
- void Visit(PhiInstructionNode* phi);
- void Visit(UnnamedConstInstructionNode* instruction);
-
- void Visit(ConstInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(ReturnInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(IfNeInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(MoveResultInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(InvokeStaticInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(AddIntInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(GotoInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
- void Visit(IfEqzInstructionNode* instruction) {
- Visit(reinterpret_cast<InstructionNode*>(instruction));
- }
-
- std::string GetResult() const {
- return dot_text_;
- }
-
- private:
- std::string dot_text_;
- SeaGraph* graph_;
- art::SafeMap<int, const Type*>* types_;
- const DotConversionOptions* const options_;
-};
-
-// Stores options for turning a SEA IR graph to a .dot file.
-class DotConversion {
- public:
- DotConversion(): options_() { }
- // Saves to @filename the .dot representation of @graph with the options @options.
- void DumpSea(SeaGraph* graph, std::string filename,
- art::SafeMap<int, const Type*>* types) const {
- LOG(INFO) << "Starting to write SEA string to file " << filename << std::endl;
- DotGenerationVisitor dgv = DotGenerationVisitor(&options_, types);
- graph->Accept(&dgv);
- // TODO: std::unique_ptr to close file properly. Switch to BufferedOutputStream.
- art::File* file = art::OS::CreateEmptyFile(filename.c_str());
- art::FileOutputStream fos(file);
- std::string graph_as_string = dgv.GetResult();
- graph_as_string += "}";
- fos.WriteFully(graph_as_string.c_str(), graph_as_string.size());
- LOG(INFO) << "Written SEA string to file.";
- }
-
- private:
- DotConversionOptions options_;
-};
-
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc
deleted file mode 100644
index b57007bbb6..0000000000
--- a/compiler/sea_ir/frontend.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifdef ART_SEA_IR_MODE
-#include <llvm/Support/Threading.h>
-#include <llvm/Support/raw_ostream.h>
-#include <llvm/Bitcode/ReaderWriter.h>
-
-#include "base/logging.h"
-#include "llvm/llvm_compilation_unit.h"
-#include "dex/portable/mir_to_gbc.h"
-#include "driver/compiler_driver.h"
-#include "verifier/method_verifier.h"
-#include "mirror/object.h"
-#include "utils.h"
-
-#include "runtime.h"
-#include "safe_map.h"
-
-#include "sea_ir/ir/sea.h"
-#include "sea_ir/debug/dot_gen.h"
-#include "sea_ir/types/types.h"
-#include "sea_ir/code_gen/code_gen.h"
-
-namespace art {
-
-static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler,
- CompilerBackend* compiler_backend,
- const DexFile::CodeItem* code_item,
- uint32_t method_access_flags, InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx,
- jobject class_loader, const DexFile& dex_file,
- void* llvm_compilation_unit) {
- LOG(INFO) << "Compiling " << PrettyMethod(method_idx, dex_file) << ".";
- sea_ir::SeaGraph* ir_graph = sea_ir::SeaGraph::GetGraph(dex_file);
- std::string symbol = "dex_" + MangleForJni(PrettyMethod(method_idx, dex_file));
- sea_ir::CodeGenData* llvm_data = ir_graph->CompileMethod(symbol,
- code_item, class_def_idx, method_idx, method_access_flags, dex_file);
- sea_ir::DotConversion dc;
- SafeMap<int, const sea_ir::Type*>* types = ir_graph->ti_->GetTypeMap();
- dc.DumpSea(ir_graph, "/tmp/temp.dot", types);
- MethodReference mref(&dex_file, method_idx);
- std::string llvm_code = llvm_data->GetElf(compiler.GetInstructionSet());
- CompiledMethod* compiled_method =
- new CompiledMethod(compiler, compiler.GetInstructionSet(), llvm_code,
- *compiler.GetVerifiedMethodsData()->GetDexGcMap(mref), symbol);
- LOG(INFO) << "Compiled SEA IR method " << PrettyMethod(method_idx, dex_file) << ".";
- return compiled_method;
-}
-
-CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler,
- CompilerBackend* backend,
- const DexFile::CodeItem* code_item,
- uint32_t method_access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file,
- void* llvm_compilation_unit) {
- return CompileMethodWithSeaIr(compiler, backend, code_item, method_access_flags, invoke_type,
- class_def_idx, method_idx, class_loader, dex_file, llvm_compilation_unit);
-}
-
-extern "C" art::CompiledMethod*
- SeaIrCompileMethod(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t method_access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file) {
- // TODO: Check method fingerprint here to determine appropriate backend type.
- // Until then, use build default
- art::CompilerBackend* backend = compiler.GetCompilerBackend();
- return art::SeaIrCompileOneMethod(compiler, backend, code_item, method_access_flags, invoke_type,
- class_def_idx, method_idx, class_loader, dex_file,
- NULL /* use thread llvm_info */);
-}
-#endif
-
-} // namespace art
diff --git a/compiler/sea_ir/ir/instruction_nodes.h b/compiler/sea_ir/ir/instruction_nodes.h
deleted file mode 100644
index 63e89e7763..0000000000
--- a/compiler/sea_ir/ir/instruction_nodes.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
-#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
-#include "dex_instruction-inl.h"
-#include "sea_ir/ir/sea_node.h"
-#include "sea_ir/ir/visitor.h"
-
-
-namespace sea_ir {
-
-enum SpecialRegisters {
- NO_REGISTER = -1, // Usually signifies that there is no register
- // that respects the condition you asked for.
- RETURN_REGISTER = -2, // Written by the invoke* instructions, read by move-results.
- UNNAMED_CONST_REGISTER = -3 // Written by UnnamedConst* instructions, read by *Lit* instruction.
-};
-
-class IRVisitor;
-
-// This class represents an instruction in SEA IR.
-// As we add support for specific classes of instructions,
-// the number of InstructionNode objects should dwindle, while the
-// number of subclasses and instances of subclasses will go up.
-class InstructionNode: public SeaNode {
- public:
- static std::vector<sea_ir::InstructionNode*> Create(const art::Instruction* in);
- // Returns the Dalvik instruction around which this InstructionNode is wrapped.
- const art::Instruction* GetInstruction() const {
- DCHECK(NULL != instruction_) << "Tried to access NULL instruction in an InstructionNode.";
- return instruction_;
- }
- // Returns the register that is defined by the current instruction, or NO_REGISTER otherwise.
- virtual int GetResultRegister() const;
- // Returns the set of registers defined by the current instruction.
- virtual std::vector<int> GetDefinitions() const;
- // Returns the set of register numbers that are used by the instruction.
- virtual std::vector<int> GetUses() const;
- // Mark the current instruction as a downward exposed definition.
- void MarkAsDEDef();
- // Rename the use of @reg_no to refer to the instruction @definition,
- // essentially creating SSA form.
- void RenameToSSA(int reg_no, InstructionNode* definition) {
- definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
- DCHECK(NULL != definition) << "SSA definition for register " << reg_no
- << " used in instruction " << Id() << " not found.";
- definition->AddSSAUse(this);
- }
- // Returns the ordered set of Instructions that define the input operands of this instruction.
- // Precondition: SeaGraph.ConvertToSSA().
- virtual std::vector<InstructionNode*> GetSSAProducers() {
- std::vector<int> uses = GetUses();
- std::vector<InstructionNode*> ssa_uses;
- for (std::vector<int>::const_iterator cit = uses.begin(); cit != uses.end(); cit++) {
- ssa_uses.push_back((*definition_edges_.find(*cit)).second);
- }
- return ssa_uses;
- }
- std::map<int, InstructionNode* >* GetSSAProducersMap() {
- return &definition_edges_;
- }
- std::vector<InstructionNode*>* GetSSAConsumers() {
- return &used_in_;
- }
- virtual void AddSSAUse(InstructionNode* use) {
- used_in_.push_back(use);
- }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
- // Set the region to which this instruction belongs.
- Region* GetRegion() {
- DCHECK(NULL != region_);
- return region_;
- }
- // Get the region to which this instruction belongs.
- void SetRegion(Region* region) {
- region_ = region;
- }
-
- protected:
- explicit InstructionNode(const art::Instruction* in):
- SeaNode(), instruction_(in), used_in_(), de_def_(false), region_(NULL) { }
-
- protected:
- const art::Instruction* const instruction_;
- std::map<int, InstructionNode* > definition_edges_; // Maps used registers to their definitions.
- // Stores pointers to instructions that use the result of the current instruction.
- std::vector<InstructionNode*> used_in_;
- bool de_def_;
- Region* region_;
-};
-
-class ConstInstructionNode: public InstructionNode {
- public:
- explicit ConstInstructionNode(const art::Instruction* inst):
- InstructionNode(inst) { }
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- virtual int32_t GetConstValue() const {
- return GetInstruction()->VRegB_11n();
- }
-};
-
-class UnnamedConstInstructionNode: public ConstInstructionNode {
- public:
- explicit UnnamedConstInstructionNode(const art::Instruction* inst, int32_t value):
- ConstInstructionNode(inst), value_(value) { }
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- int GetResultRegister() const {
- return UNNAMED_CONST_REGISTER;
- }
-
- int32_t GetConstValue() const {
- return value_;
- }
-
- private:
- const int32_t value_;
-};
-
-class ReturnInstructionNode: public InstructionNode {
- public:
- explicit ReturnInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-class IfNeInstructionNode: public InstructionNode {
- public:
- explicit IfNeInstructionNode(const art::Instruction* inst): InstructionNode(inst) {
- DCHECK(InstructionTools::IsDefinition(inst) == false);
- }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-
-
-class MoveResultInstructionNode: public InstructionNode {
- public:
- explicit MoveResultInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
- std::vector<int> GetUses() const {
- std::vector<int> uses; // Using vector<> instead of set<> because order matters.
- uses.push_back(RETURN_REGISTER);
- return uses;
- }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-class InvokeStaticInstructionNode: public InstructionNode {
- public:
- explicit InvokeStaticInstructionNode(const art::Instruction* inst): InstructionNode(inst),
- method_index_(inst->VRegB_35c()) { }
- int GetResultRegister() const {
- return RETURN_REGISTER;
- }
-
- int GetCalledMethodIndex() const {
- return method_index_;
- }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- private:
- const uint32_t method_index_;
-};
-
-class AddIntInstructionNode: public InstructionNode {
- public:
- explicit AddIntInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-class AddIntLitInstructionNode: public AddIntInstructionNode {
- public:
- explicit AddIntLitInstructionNode(const art::Instruction* inst):
- AddIntInstructionNode(inst) { }
-
- std::vector<int> GetUses() const {
- std::vector<int> uses = AddIntInstructionNode::GetUses();
- uses.push_back(UNNAMED_CONST_REGISTER);
- return uses;
- }
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-class GotoInstructionNode: public InstructionNode {
- public:
- explicit GotoInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-
-class IfEqzInstructionNode: public InstructionNode {
- public:
- explicit IfEqzInstructionNode(const art::Instruction* inst): InstructionNode(inst) {
- DCHECK(InstructionTools::IsDefinition(inst) == false);
- }
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
diff --git a/compiler/sea_ir/ir/instruction_tools.cc b/compiler/sea_ir/ir/instruction_tools.cc
deleted file mode 100644
index 143209de75..0000000000
--- a/compiler/sea_ir/ir/instruction_tools.cc
+++ /dev/null
@@ -1,797 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "sea_ir/ir/instruction_tools.h"
-
-namespace sea_ir {
-
-bool InstructionTools::IsDefinition(const art::Instruction* const instruction) {
- if (0 != (InstructionTools::instruction_attributes_[instruction->Opcode()] & (1 << kDA))) {
- return true;
- }
- return false;
-}
-
-const int InstructionTools::instruction_attributes_[] = {
- // 00 NOP
- DF_NOP,
-
- // 01 MOVE vA, vB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 02 MOVE_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 03 MOVE_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_IS_MOVE,
-
- // 04 MOVE_WIDE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 05 MOVE_WIDE_FROM16 vAA, vBBBB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 06 MOVE_WIDE_16 vAAAA, vBBBB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_IS_MOVE,
-
- // 07 MOVE_OBJECT vA, vB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 09 MOVE_OBJECT_16 vAAAA, vBBBB
- DF_DA | DF_UB | DF_NULL_TRANSFER_0 | DF_IS_MOVE | DF_REF_A | DF_REF_B,
-
- // 0A MOVE_RESULT vAA
- DF_DA,
-
- // 0B MOVE_RESULT_WIDE vAA
- DF_DA | DF_A_WIDE,
-
- // 0C MOVE_RESULT_OBJECT vAA
- DF_DA | DF_REF_A,
-
- // 0D MOVE_EXCEPTION vAA
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 0E RETURN_VOID
- DF_NOP,
-
- // 0F RETURN vAA
- DF_UA,
-
- // 10 RETURN_WIDE vAA
- DF_UA | DF_A_WIDE,
-
- // 11 RETURN_OBJECT vAA
- DF_UA | DF_REF_A,
-
- // 12 CONST_4 vA, #+B
- DF_DA | DF_SETS_CONST,
-
- // 13 CONST_16 vAA, #+BBBB
- DF_DA | DF_SETS_CONST,
-
- // 14 CONST vAA, #+BBBBBBBB
- DF_DA | DF_SETS_CONST,
-
- // 15 CONST_HIGH16 VAA, #+BBBB0000
- DF_DA | DF_SETS_CONST,
-
- // 16 CONST_WIDE_16 vAA, #+BBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
- DF_DA | DF_A_WIDE | DF_SETS_CONST,
-
- // 1A CONST_STRING vAA, string@BBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1C CONST_CLASS vAA, type@BBBB
- DF_DA | DF_REF_A | DF_NON_NULL_DST,
-
- // 1D MONITOR_ENTER vAA
- DF_UA | DF_NULL_CHK_0 | DF_REF_A,
-
- // 1E MONITOR_EXIT vAA
- DF_UA | DF_NULL_CHK_0 | DF_REF_A,
-
- // 1F CHK_CAST vAA, type@BBBB
- DF_UA | DF_REF_A | DF_UMS,
-
- // 20 INSTANCE_OF vA, vB, type@CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_REF_B | DF_UMS,
-
- // 21 ARRAY_LENGTH vA, vB
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_CORE_A | DF_REF_B,
-
- // 22 NEW_INSTANCE vAA, type@BBBB
- DF_DA | DF_NON_NULL_DST | DF_REF_A | DF_UMS,
-
- // 23 NEW_ARRAY vA, vB, type@CCCC
- DF_DA | DF_UB | DF_NON_NULL_DST | DF_REF_A | DF_CORE_B | DF_UMS,
-
- // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NON_NULL_RET | DF_UMS,
-
- // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
- DF_FORMAT_3RC | DF_NON_NULL_RET | DF_UMS,
-
- // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
- DF_UA | DF_REF_A | DF_UMS,
-
- // 27 THROW vAA
- DF_UA | DF_REF_A | DF_UMS,
-
- // 28 GOTO
- DF_NOP,
-
- // 29 GOTO_16
- DF_NOP,
-
- // 2A GOTO_32
- DF_NOP,
-
- // 2B PACKED_SWITCH vAA, +BBBBBBBB
- DF_UA,
-
- // 2C SPARSE_SWITCH vAA, +BBBBBBBB
- DF_UA,
-
- // 2D CMPL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 2E CMPG_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 2F CMPL_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 30 CMPG_DOUBLE vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_B | DF_FP_C | DF_CORE_A,
-
- // 31 CMP_LONG vAA, vBB, vCC
- DF_DA | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 32 IF_EQ vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 33 IF_NE vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 34 IF_LT vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 35 IF_GE vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 36 IF_GT vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 37 IF_LE vA, vB, +CCCC
- DF_UA | DF_UB,
-
- // 38 IF_EQZ vAA, +BBBB
- DF_UA,
-
- // 39 IF_NEZ vAA, +BBBB
- DF_UA,
-
- // 3A IF_LTZ vAA, +BBBB
- DF_UA,
-
- // 3B IF_GEZ vAA, +BBBB
- DF_UA,
-
- // 3C IF_GTZ vAA, +BBBB
- DF_UA,
-
- // 3D IF_LEZ vAA, +BBBB
- DF_UA,
-
- // 3E UNUSED_3E
- DF_NOP,
-
- // 3F UNUSED_3F
- DF_NOP,
-
- // 40 UNUSED_40
- DF_NOP,
-
- // 41 UNUSED_41
- DF_NOP,
-
- // 42 UNUSED_42
- DF_NOP,
-
- // 43 UNUSED_43
- DF_NOP,
-
- // 44 AGET vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 45 AGET_WIDE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 46 AGET_OBJECT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_A | DF_REF_B | DF_CORE_C,
-
- // 47 AGET_BOOLEAN vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 48 AGET_BYTE vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 49 AGET_CHAR vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 4A AGET_SHORT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_NULL_CHK_0 | DF_RANGE_CHK_1 | DF_REF_B | DF_CORE_C,
-
- // 4B APUT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
- // 4C APUT_WIDE vAA, vBB, vCC
- DF_UA | DF_A_WIDE | DF_UB | DF_UC | DF_NULL_CHK_2 | DF_RANGE_CHK_3 | DF_REF_B | DF_CORE_C,
-
- // 4D APUT_OBJECT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_A | DF_REF_B | DF_CORE_C,
-
- // 4E APUT_BOOLEAN vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
- // 4F APUT_BYTE vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
- // 50 APUT_CHAR vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
- // 51 APUT_SHORT vAA, vBB, vCC
- DF_UA | DF_UB | DF_UC | DF_NULL_CHK_1 | DF_RANGE_CHK_2 | DF_REF_B | DF_CORE_C,
-
- // 52 IGET vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 53 IGET_WIDE vA, vB, field@CCCC
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 54 IGET_OBJECT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
-
- // 55 IGET_BOOLEAN vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 56 IGET_BYTE vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 57 IGET_CHAR vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 58 IGET_SHORT vA, vB, field@CCCC
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // 59 IPUT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // 5A IPUT_WIDE vA, vB, field@CCCC
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
-
- // 5B IPUT_OBJECT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
-
- // 5C IPUT_BOOLEAN vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // 5D IPUT_BYTE vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // 5E IPUT_CHAR vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // 5F IPUT_SHORT vA, vB, field@CCCC
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // 60 SGET vAA, field@BBBB
- DF_DA | DF_UMS,
-
- // 61 SGET_WIDE vAA, field@BBBB
- DF_DA | DF_A_WIDE | DF_UMS,
-
- // 62 SGET_OBJECT vAA, field@BBBB
- DF_DA | DF_REF_A | DF_UMS,
-
- // 63 SGET_BOOLEAN vAA, field@BBBB
- DF_DA | DF_UMS,
-
- // 64 SGET_BYTE vAA, field@BBBB
- DF_DA | DF_UMS,
-
- // 65 SGET_CHAR vAA, field@BBBB
- DF_DA | DF_UMS,
-
- // 66 SGET_SHORT vAA, field@BBBB
- DF_DA | DF_UMS,
-
- // 67 SPUT vAA, field@BBBB
- DF_UA | DF_UMS,
-
- // 68 SPUT_WIDE vAA, field@BBBB
- DF_UA | DF_A_WIDE | DF_UMS,
-
- // 69 SPUT_OBJECT vAA, field@BBBB
- DF_UA | DF_REF_A | DF_UMS,
-
- // 6A SPUT_BOOLEAN vAA, field@BBBB
- DF_UA | DF_UMS,
-
- // 6B SPUT_BYTE vAA, field@BBBB
- DF_UA | DF_UMS,
-
- // 6C SPUT_CHAR vAA, field@BBBB
- DF_UA | DF_UMS,
-
- // 6D SPUT_SHORT vAA, field@BBBB
- DF_UA | DF_UMS,
-
- // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_UMS,
-
- // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 73 UNUSED_73
- DF_NOP,
-
- // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_UMS,
-
- // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // 79 UNUSED_79
- DF_NOP,
-
- // 7A UNUSED_7A
- DF_NOP,
-
- // 7B NEG_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 7C NOT_INT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 7D NEG_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 7E NOT_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 7F NEG_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_FP_B,
-
- // 80 NEG_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 81 INT_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 82 INT_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_FP_A | DF_CORE_B,
-
- // 83 INT_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_CORE_B,
-
- // 84 LONG_TO_INT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // 85 LONG_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
- // 86 LONG_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_A | DF_CORE_B,
-
- // 87 FLOAT_TO_INT vA, vB
- DF_DA | DF_UB | DF_FP_B | DF_CORE_A,
-
- // 88 FLOAT_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_B | DF_CORE_A,
-
- // 89 FLOAT_TO_DOUBLE vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_FP_A | DF_FP_B,
-
- // 8A DOUBLE_TO_INT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
- // 8B DOUBLE_TO_LONG vA, vB
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_FP_B | DF_CORE_A,
-
- // 8C DOUBLE_TO_FLOAT vA, vB
- DF_DA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // 8D INT_TO_BYTE vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 8E INT_TO_CHAR vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 8F INT_TO_SHORT vA, vB
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // 90 ADD_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 91 SUB_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 92 MUL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 93 DIV_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 94 REM_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 95 AND_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 96 OR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 97 XOR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 98 SHL_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 99 SHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9A USHR_INT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9B ADD_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9C SUB_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9D MUL_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9E DIV_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // 9F REM_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A0 AND_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A1 OR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A2 XOR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A3 SHL_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A4 SHR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A5 USHR_LONG vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_CORE_A | DF_CORE_B | DF_CORE_C,
-
- // A6 ADD_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A7 SUB_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A8 MUL_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // A9 DIV_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AA REM_FLOAT vAA, vBB, vCC
- DF_DA | DF_UB | DF_UC | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AB ADD_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AC SUB_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AD MUL_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AE DIV_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // AF REM_DOUBLE vAA, vBB, vCC
- DF_DA | DF_A_WIDE | DF_UB | DF_B_WIDE | DF_UC | DF_C_WIDE | DF_FP_A | DF_FP_B | DF_FP_C,
-
- // B0 ADD_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B1 SUB_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B2 MUL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B3 DIV_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B4 REM_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B5 AND_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B6 OR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B7 XOR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B8 SHL_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // B9 SHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // BA USHR_INT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // BB ADD_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BC SUB_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BD MUL_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BE DIV_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // BF REM_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C0 AND_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C1 OR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C2 XOR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_CORE_A | DF_CORE_B,
-
- // C3 SHL_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C4 SHR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C5 USHR_LONG_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // C6 ADD_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C7 SUB_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C8 MUL_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // C9 DIV_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // CA REM_FLOAT_2ADDR vA, vB
- DF_DA | DF_UA | DF_UB | DF_FP_A | DF_FP_B,
-
- // CB ADD_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CC SUB_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CD MUL_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CE DIV_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // CF REM_DOUBLE_2ADDR vA, vB
- DF_DA | DF_A_WIDE | DF_UA | DF_UB | DF_B_WIDE | DF_FP_A | DF_FP_B,
-
- // D0 ADD_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D1 RSUB_INT vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D2 MUL_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D3 DIV_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D4 REM_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D5 AND_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D6 OR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D7 XOR_INT_LIT16 vA, vB, #+CCCC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D8 ADD_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DA MUL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DB DIV_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DC REM_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DD AND_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DE OR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // DF XOR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E0 SHL_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E1 SHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E2 USHR_INT_LIT8 vAA, vBB, #+CC
- DF_DA | DF_UB | DF_CORE_A | DF_CORE_B,
-
- // E3 IGET_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // E4 IPUT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_B,
-
- // E5 SGET_VOLATILE
- DF_DA | DF_UMS,
-
- // E6 SPUT_VOLATILE
- DF_UA | DF_UMS,
-
- // E7 IGET_OBJECT_VOLATILE
- DF_DA | DF_UB | DF_NULL_CHK_0 | DF_REF_A | DF_REF_B,
-
- // E8 IGET_WIDE_VOLATILE
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0 | DF_REF_B,
-
- // E9 IPUT_WIDE_VOLATILE
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2 | DF_REF_B,
-
- // EA SGET_WIDE_VOLATILE
- DF_DA | DF_A_WIDE | DF_UMS,
-
- // EB SPUT_WIDE_VOLATILE
- DF_UA | DF_A_WIDE | DF_UMS,
-
- // EC BREAKPOINT
- DF_NOP,
-
- // ED THROW_VERIFICATION_ERROR
- DF_NOP | DF_UMS,
-
- // EE EXECUTE_INLINE
- DF_FORMAT_35C,
-
- // EF EXECUTE_INLINE_RANGE
- DF_FORMAT_3RC,
-
- // F0 INVOKE_OBJECT_INIT_RANGE
- DF_NOP | DF_NULL_CHK_0,
-
- // F1 RETURN_VOID_BARRIER
- DF_NOP,
-
- // F2 IGET_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0,
-
- // F3 IGET_WIDE_QUICK
- DF_DA | DF_A_WIDE | DF_UB | DF_NULL_CHK_0,
-
- // F4 IGET_OBJECT_QUICK
- DF_DA | DF_UB | DF_NULL_CHK_0,
-
- // F5 IPUT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1,
-
- // F6 IPUT_WIDE_QUICK
- DF_UA | DF_A_WIDE | DF_UB | DF_NULL_CHK_2,
-
- // F7 IPUT_OBJECT_QUICK
- DF_UA | DF_UB | DF_NULL_CHK_1,
-
- // F8 INVOKE_VIRTUAL_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // F9 INVOKE_VIRTUAL_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FA INVOKE_SUPER_QUICK
- DF_FORMAT_35C | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FB INVOKE_SUPER_QUICK_RANGE
- DF_FORMAT_3RC | DF_NULL_CHK_OUT0 | DF_UMS,
-
- // FC IPUT_OBJECT_VOLATILE
- DF_UA | DF_UB | DF_NULL_CHK_1 | DF_REF_A | DF_REF_B,
-
- // FD SGET_OBJECT_VOLATILE
- DF_DA | DF_REF_A | DF_UMS,
-
- // FE SPUT_OBJECT_VOLATILE
- DF_UA | DF_REF_A | DF_UMS,
-
- // FF UNUSED_FF
- DF_NOP
-};
-} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/instruction_tools.h b/compiler/sea_ir/ir/instruction_tools.h
deleted file mode 100644
index 895e01732a..0000000000
--- a/compiler/sea_ir/ir/instruction_tools.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "sea.h"
-#include "dex_instruction.h"
-
-#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
-#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
-
-
-// Note: This file has content cannibalized for SEA_IR from the MIR implementation,
-// to avoid having a dependence on MIR.
-namespace sea_ir {
-
-#define DF_NOP 0
-#define DF_UA (1 << kUA)
-#define DF_UB (1 << kUB)
-#define DF_UC (1 << kUC)
-#define DF_A_WIDE (1 << kAWide)
-#define DF_B_WIDE (1 << kBWide)
-#define DF_C_WIDE (1 << kCWide)
-#define DF_DA (1 << kDA)
-#define DF_IS_MOVE (1 << kIsMove)
-#define DF_SETS_CONST (1 << kSetsConst)
-#define DF_FORMAT_35C (1 << kFormat35c)
-#define DF_FORMAT_3RC (1 << kFormat3rc)
-#define DF_NULL_CHK_0 (1 << kNullCheckSrc0)
-#define DF_NULL_CHK_1 (1 << kNullCheckSrc1)
-#define DF_NULL_CHK_2 (1 << kNullCheckSrc2)
-#define DF_NULL_CHK_OUT0 (1 << kNullCheckOut0)
-#define DF_NON_NULL_DST (1 << kDstNonNull)
-#define DF_NON_NULL_RET (1 << kRetNonNull)
-#define DF_NULL_TRANSFER_0 (1 << kNullTransferSrc0)
-#define DF_NULL_TRANSFER_N (1 << kNullTransferSrcN)
-#define DF_RANGE_CHK_1 (1 << kRangeCheckSrc1)
-#define DF_RANGE_CHK_2 (1 << kRangeCheckSrc2)
-#define DF_RANGE_CHK_3 (1 << kRangeCheckSrc3)
-#define DF_FP_A (1 << kFPA)
-#define DF_FP_B (1 << kFPB)
-#define DF_FP_C (1 << kFPC)
-#define DF_CORE_A (1 << kCoreA)
-#define DF_CORE_B (1 << kCoreB)
-#define DF_CORE_C (1 << kCoreC)
-#define DF_REF_A (1 << kRefA)
-#define DF_REF_B (1 << kRefB)
-#define DF_REF_C (1 << kRefC)
-#define DF_UMS (1 << kUsesMethodStar)
-
-#define DF_HAS_USES (DF_UA | DF_UB | DF_UC)
-
-#define DF_HAS_DEFS (DF_DA)
-
-#define DF_HAS_NULL_CHKS (DF_NULL_CHK_0 | \
- DF_NULL_CHK_1 | \
- DF_NULL_CHK_2 | \
- DF_NULL_CHK_OUT0)
-
-#define DF_HAS_RANGE_CHKS (DF_RANGE_CHK_1 | \
- DF_RANGE_CHK_2 | \
- DF_RANGE_CHK_3)
-
-#define DF_HAS_NR_CHKS (DF_HAS_NULL_CHKS | \
- DF_HAS_RANGE_CHKS)
-
-#define DF_A_IS_REG (DF_UA | DF_DA)
-#define DF_B_IS_REG (DF_UB)
-#define DF_C_IS_REG (DF_UC)
-#define DF_IS_GETTER_OR_SETTER (DF_IS_GETTER | DF_IS_SETTER)
-#define DF_USES_FP (DF_FP_A | DF_FP_B | DF_FP_C)
-
-enum DataFlowAttributePos {
- kUA = 0,
- kUB,
- kUC,
- kAWide,
- kBWide,
- kCWide,
- kDA,
- kIsMove,
- kSetsConst,
- kFormat35c,
- kFormat3rc,
- kNullCheckSrc0, // Null check of uses[0].
- kNullCheckSrc1, // Null check of uses[1].
- kNullCheckSrc2, // Null check of uses[2].
- kNullCheckOut0, // Null check out outgoing arg0.
- kDstNonNull, // May assume dst is non-null.
- kRetNonNull, // May assume retval is non-null.
- kNullTransferSrc0, // Object copy src[0] -> dst.
- kNullTransferSrcN, // Phi null check state transfer.
- kRangeCheckSrc1, // Range check of uses[1].
- kRangeCheckSrc2, // Range check of uses[2].
- kRangeCheckSrc3, // Range check of uses[3].
- kFPA,
- kFPB,
- kFPC,
- kCoreA,
- kCoreB,
- kCoreC,
- kRefA,
- kRefB,
- kRefC,
- kUsesMethodStar, // Implicit use of Method*.
-};
-
-class InstructionTools {
- public:
- static bool IsDefinition(const art::Instruction* instruction);
- static const int instruction_attributes_[];
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
diff --git a/compiler/sea_ir/ir/regions_test.cc b/compiler/sea_ir/ir/regions_test.cc
deleted file mode 100644
index 95bd31075e..0000000000
--- a/compiler/sea_ir/ir/regions_test.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_compiler_test.h"
-#include "sea_ir/ir/sea.h"
-
-using utils::ScopedHashtable;
-
-namespace sea_ir {
-
-class RegionsTest : public art::CommonCompilerTest {};
-
-TEST_F(RegionsTest, Basics) {
- sea_ir::SeaGraph sg(*java_lang_dex_file_);
- sea_ir::Region* root = sg.GetNewRegion();
- sea_ir::Region* then_region = sg.GetNewRegion();
- sea_ir::Region* else_region = sg.GetNewRegion();
- std::vector<sea_ir::Region*>* regions = sg.GetRegions();
- // Test that regions have been registered correctly as children of the graph.
- EXPECT_TRUE(std::find(regions->begin(), regions->end(), root) != regions->end());
- EXPECT_TRUE(std::find(regions->begin(), regions->end(), then_region) != regions->end());
- EXPECT_TRUE(std::find(regions->begin(), regions->end(), else_region) != regions->end());
- // Check that an edge recorded correctly in both the head and the tail.
- sg.AddEdge(root, then_region);
- std::vector<sea_ir::Region*>* succs = root->GetSuccessors();
- EXPECT_EQ(1U, succs->size());
- EXPECT_EQ(then_region, succs->at(0));
- std::vector<sea_ir::Region*>* preds = then_region->GetPredecessors();
- EXPECT_EQ(1U, preds->size());
- EXPECT_EQ(root, preds->at(0));
- // Check that two edges are recorded properly for both head and tail.
- sg.AddEdge(root, else_region);
- succs = root->GetSuccessors();
- EXPECT_EQ(2U, succs->size());
- EXPECT_TRUE(std::find(succs->begin(), succs->end(), then_region) != succs->end());
- EXPECT_TRUE(std::find(succs->begin(), succs->end(), else_region) != succs->end());
- preds = then_region->GetPredecessors();
- EXPECT_EQ(1U, preds->size());
- EXPECT_EQ(root, preds->at(0));
- preds = else_region->GetPredecessors();
- EXPECT_EQ(1U, preds->size());
- EXPECT_EQ(root, preds->at(0));
-}
-
-} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/sea.cc b/compiler/sea_ir/ir/sea.cc
deleted file mode 100644
index 2b25f568d1..0000000000
--- a/compiler/sea_ir/ir/sea.cc
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "base/stringprintf.h"
-#include "sea_ir/ir/instruction_tools.h"
-#include "sea_ir/ir/sea.h"
-#include "sea_ir/code_gen/code_gen.h"
-#include "sea_ir/types/type_inference.h"
-
-#define MAX_REACHING_DEF_ITERERATIONS (10)
-// TODO: When development is done, this define should not
-// be needed, it is currently used as a cutoff
-// for cases where the iterative fixed point algorithm
-// does not reach a fixed point because of a bug.
-
-namespace sea_ir {
-
-int SeaNode::current_max_node_id_ = 0;
-
-void IRVisitor::Traverse(Region* region) {
- std::vector<PhiInstructionNode*>* phis = region->GetPhiNodes();
- for (std::vector<PhiInstructionNode*>::const_iterator cit = phis->begin();
- cit != phis->end(); cit++) {
- (*cit)->Accept(this);
- }
- std::vector<InstructionNode*>* instructions = region->GetInstructions();
- for (std::vector<InstructionNode*>::const_iterator cit = instructions->begin();
- cit != instructions->end(); cit++) {
- (*cit)->Accept(this);
- }
-}
-
-void IRVisitor::Traverse(SeaGraph* graph) {
- for (std::vector<Region*>::const_iterator cit = ordered_regions_.begin();
- cit != ordered_regions_.end(); cit++ ) {
- (*cit)->Accept(this);
- }
-}
-
-SeaGraph* SeaGraph::GetGraph(const art::DexFile& dex_file) {
- return new SeaGraph(dex_file);
-}
-
-void SeaGraph::AddEdge(Region* src, Region* dst) const {
- src->AddSuccessor(dst);
- dst->AddPredecessor(src);
-}
-
-void SeaGraph::ComputeRPO(Region* current_region, int& current_rpo) {
- current_region->SetRPO(VISITING);
- std::vector<sea_ir::Region*>* succs = current_region->GetSuccessors();
- for (std::vector<sea_ir::Region*>::iterator succ_it = succs->begin();
- succ_it != succs->end(); ++succ_it) {
- if (NOT_VISITED == (*succ_it)->GetRPO()) {
- SeaGraph::ComputeRPO(*succ_it, current_rpo);
- }
- }
- current_region->SetRPO(current_rpo--);
-}
-
-void SeaGraph::ComputeIDominators() {
- bool changed = true;
- while (changed) {
- changed = false;
- // Entry node has itself as IDOM.
- std::vector<Region*>::iterator crt_it;
- std::set<Region*> processedNodes;
- // Find and mark the entry node(s).
- for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
- if ((*crt_it)->GetPredecessors()->size() == 0) {
- processedNodes.insert(*crt_it);
- (*crt_it)->SetIDominator(*crt_it);
- }
- }
- for (crt_it = regions_.begin(); crt_it != regions_.end(); ++crt_it) {
- if ((*crt_it)->GetPredecessors()->size() == 0) {
- continue;
- }
- // NewIDom = first (processed) predecessor of b.
- Region* new_dom = NULL;
- std::vector<Region*>* preds = (*crt_it)->GetPredecessors();
- DCHECK(NULL != preds);
- Region* root_pred = NULL;
- for (std::vector<Region*>::iterator pred_it = preds->begin();
- pred_it != preds->end(); ++pred_it) {
- if (processedNodes.end() != processedNodes.find((*pred_it))) {
- root_pred = *pred_it;
- new_dom = root_pred;
- break;
- }
- }
- // For all other predecessors p of b, if idom is not set,
- // then NewIdom = Intersect(p, NewIdom)
- for (std::vector<Region*>::const_iterator pred_it = preds->begin();
- pred_it != preds->end(); ++pred_it) {
- DCHECK(NULL != *pred_it);
- // if IDOMS[p] != UNDEFINED
- if ((*pred_it != root_pred) && (*pred_it)->GetIDominator() != NULL) {
- DCHECK(NULL != new_dom);
- new_dom = SeaGraph::Intersect(*pred_it, new_dom);
- }
- }
- DCHECK(NULL != *crt_it);
- if ((*crt_it)->GetIDominator() != new_dom) {
- (*crt_it)->SetIDominator(new_dom);
- changed = true;
- }
- processedNodes.insert(*crt_it);
- }
- }
-
- // For easily ordering of regions we need edges dominator->dominated.
- for (std::vector<Region*>::iterator region_it = regions_.begin();
- region_it != regions_.end(); region_it++) {
- Region* idom = (*region_it)->GetIDominator();
- if (idom != *region_it) {
- idom->AddToIDominatedSet(*region_it);
- }
- }
-}
-
-Region* SeaGraph::Intersect(Region* i, Region* j) {
- Region* finger1 = i;
- Region* finger2 = j;
- while (finger1 != finger2) {
- while (finger1->GetRPO() > finger2->GetRPO()) {
- DCHECK(NULL != finger1);
- finger1 = finger1->GetIDominator(); // should have: finger1 != NULL
- DCHECK(NULL != finger1);
- }
- while (finger1->GetRPO() < finger2->GetRPO()) {
- DCHECK(NULL != finger2);
- finger2 = finger2->GetIDominator(); // should have: finger1 != NULL
- DCHECK(NULL != finger2);
- }
- }
- return finger1; // finger1 should be equal to finger2 at this point.
-}
-
-void SeaGraph::ComputeDownExposedDefs() {
- for (std::vector<Region*>::iterator region_it = regions_.begin();
- region_it != regions_.end(); region_it++) {
- (*region_it)->ComputeDownExposedDefs();
- }
-}
-
-void SeaGraph::ComputeReachingDefs() {
- // Iterate until the reaching definitions set doesn't change anymore.
- // (See Cooper & Torczon, "Engineering a Compiler", second edition, page 487)
- bool changed = true;
- int iteration = 0;
- while (changed && (iteration < MAX_REACHING_DEF_ITERERATIONS)) {
- iteration++;
- changed = false;
- // TODO: optimize the ordering if this becomes performance bottleneck.
- for (std::vector<Region*>::iterator regions_it = regions_.begin();
- regions_it != regions_.end();
- regions_it++) {
- changed |= (*regions_it)->UpdateReachingDefs();
- }
- }
- DCHECK(!changed) << "Reaching definitions computation did not reach a fixed point.";
-}
-
-void SeaGraph::InsertSignatureNodes(const art::DexFile::CodeItem* code_item, Region* r) {
- // Insert a fake SignatureNode for the first parameter.
- // TODO: Provide a register enum value for the fake parameter.
- SignatureNode* parameter_def_node = new sea_ir::SignatureNode(0, 0);
- AddParameterNode(parameter_def_node);
- r->AddChild(parameter_def_node);
- // Insert SignatureNodes for each Dalvik register parameter.
- for (unsigned int crt_offset = 0; crt_offset < code_item->ins_size_; crt_offset++) {
- int register_no = code_item->registers_size_ - crt_offset - 1;
- int position = crt_offset + 1;
- SignatureNode* parameter_def_node = new sea_ir::SignatureNode(register_no, position);
- AddParameterNode(parameter_def_node);
- r->AddChild(parameter_def_node);
- }
-}
-
-void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
- const art::DexFile& dex_file, uint16_t class_def_idx,
- uint32_t method_idx, uint32_t method_access_flags) {
- code_item_ = code_item;
- class_def_idx_ = class_def_idx;
- method_idx_ = method_idx;
- method_access_flags_ = method_access_flags;
- const uint16_t* code = code_item->insns_;
- const size_t size_in_code_units = code_item->insns_size_in_code_units_;
- // This maps target instruction pointers to their corresponding region objects.
- std::map<const uint16_t*, Region*> target_regions;
- size_t i = 0;
- // Pass: Find the start instruction of basic blocks
- // by locating targets and flow-though instructions of branches.
- while (i < size_in_code_units) {
- const art::Instruction* inst = art::Instruction::At(&code[i]);
- if (inst->IsBranch() || inst->IsUnconditional()) {
- int32_t offset = inst->GetTargetOffset();
- if (target_regions.end() == target_regions.find(&code[i + offset])) {
- Region* region = GetNewRegion();
- target_regions.insert(std::pair<const uint16_t*, Region*>(&code[i + offset], region));
- }
- if (inst->CanFlowThrough()
- && (target_regions.end() == target_regions.find(&code[i + inst->SizeInCodeUnits()]))) {
- Region* region = GetNewRegion();
- target_regions.insert(
- std::pair<const uint16_t*, Region*>(&code[i + inst->SizeInCodeUnits()], region));
- }
- }
- i += inst->SizeInCodeUnits();
- }
-
-
- Region* r = GetNewRegion();
-
- InsertSignatureNodes(code_item, r);
- // Pass: Assign instructions to region nodes and
- // assign branches their control flow successors.
- i = 0;
- sea_ir::InstructionNode* last_node = NULL;
- sea_ir::InstructionNode* node = NULL;
- while (i < size_in_code_units) {
- const art::Instruction* inst = art::Instruction::At(&code[i]);
- std::vector<InstructionNode*> sea_instructions_for_dalvik =
- sea_ir::InstructionNode::Create(inst);
- for (std::vector<InstructionNode*>::const_iterator cit = sea_instructions_for_dalvik.begin();
- sea_instructions_for_dalvik.end() != cit; ++cit) {
- last_node = node;
- node = *cit;
-
- if (inst->IsBranch() || inst->IsUnconditional()) {
- int32_t offset = inst->GetTargetOffset();
- std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i + offset]);
- DCHECK(it != target_regions.end());
- AddEdge(r, it->second); // Add edge to branch target.
- }
- std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i]);
- if (target_regions.end() != it) {
- // Get the already created region because this is a branch target.
- Region* nextRegion = it->second;
- if (last_node->GetInstruction()->IsBranch()
- && last_node->GetInstruction()->CanFlowThrough()) {
- AddEdge(r, it->second); // Add flow-through edge.
- }
- r = nextRegion;
- }
- r->AddChild(node);
- }
- i += inst->SizeInCodeUnits();
- }
-}
-
-void SeaGraph::ComputeRPO() {
- int rpo_id = regions_.size() - 1;
- for (std::vector<Region*>::const_iterator crt_it = regions_.begin(); crt_it != regions_.end();
- ++crt_it) {
- if ((*crt_it)->GetPredecessors()->size() == 0) {
- ComputeRPO(*crt_it, rpo_id);
- }
- }
-}
-
-// Performs the renaming phase in traditional SSA transformations.
-// See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
-void SeaGraph::RenameAsSSA() {
- utils::ScopedHashtable<int, InstructionNode*> scoped_table;
- scoped_table.OpenScope();
- for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
- region_it++) {
- if ((*region_it)->GetIDominator() == *region_it) {
- RenameAsSSA(*region_it, &scoped_table);
- }
- }
- scoped_table.CloseScope();
-}
-
-void SeaGraph::ConvertToSSA() {
- // Pass: find global names.
- // The map @block maps registers to the blocks in which they are defined.
- std::map<int, std::set<Region*>> blocks;
- // The set @globals records registers whose use
- // is in a different block than the corresponding definition.
- std::set<int> globals;
- for (std::vector<Region*>::iterator region_it = regions_.begin(); region_it != regions_.end();
- region_it++) {
- std::set<int> var_kill;
- std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
- for (std::vector<InstructionNode*>::iterator inst_it = instructions->begin();
- inst_it != instructions->end(); inst_it++) {
- std::vector<int> used_regs = (*inst_it)->GetUses();
- for (std::size_t i = 0; i < used_regs.size(); i++) {
- int used_reg = used_regs[i];
- if (var_kill.find(used_reg) == var_kill.end()) {
- globals.insert(used_reg);
- }
- }
- const int reg_def = (*inst_it)->GetResultRegister();
- if (reg_def != NO_REGISTER) {
- var_kill.insert(reg_def);
- }
-
- blocks.insert(std::pair<int, std::set<Region*>>(reg_def, std::set<Region*>()));
- std::set<Region*>* reg_def_blocks = &(blocks.find(reg_def)->second);
- reg_def_blocks->insert(*region_it);
- }
- }
-
- // Pass: Actually add phi-nodes to regions.
- for (std::set<int>::const_iterator globals_it = globals.begin();
- globals_it != globals.end(); globals_it++) {
- int global = *globals_it;
- // Copy the set, because we will modify the worklist as we go.
- std::set<Region*> worklist((*(blocks.find(global))).second);
- for (std::set<Region*>::const_iterator b_it = worklist.begin();
- b_it != worklist.end(); b_it++) {
- std::set<Region*>* df = (*b_it)->GetDominanceFrontier();
- for (std::set<Region*>::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) {
- if ((*df_it)->InsertPhiFor(global)) {
- // Check that the dominance frontier element is in the worklist already
- // because we only want to break if the element is actually not there yet.
- if (worklist.find(*df_it) == worklist.end()) {
- worklist.insert(*df_it);
- b_it = worklist.begin();
- break;
- }
- }
- }
- }
- }
- // Pass: Build edges to the definition corresponding to each use.
- // (This corresponds to the renaming phase in traditional SSA transformations.
- // See: Cooper & Torczon, "Engineering a Compiler", second edition, page 505.)
- RenameAsSSA();
-}
-
-void SeaGraph::RenameAsSSA(Region* crt_region,
- utils::ScopedHashtable<int, InstructionNode*>* scoped_table) {
- scoped_table->OpenScope();
- // Rename phi nodes defined in the current region.
- std::vector<PhiInstructionNode*>* phis = crt_region->GetPhiNodes();
- for (std::vector<PhiInstructionNode*>::iterator phi_it = phis->begin();
- phi_it != phis->end(); phi_it++) {
- int reg_no = (*phi_it)->GetRegisterNumber();
- scoped_table->Add(reg_no, (*phi_it));
- }
- // Rename operands of instructions from the current region.
- std::vector<InstructionNode*>* instructions = crt_region->GetInstructions();
- for (std::vector<InstructionNode*>::const_iterator instructions_it = instructions->begin();
- instructions_it != instructions->end(); instructions_it++) {
- InstructionNode* current_instruction = (*instructions_it);
- // Rename uses.
- std::vector<int> used_regs = current_instruction->GetUses();
- for (std::vector<int>::const_iterator reg_it = used_regs.begin();
- reg_it != used_regs.end(); reg_it++) {
- int current_used_reg = (*reg_it);
- InstructionNode* definition = scoped_table->Lookup(current_used_reg);
- current_instruction->RenameToSSA(current_used_reg, definition);
- }
- // Update scope table with latest definitions.
- std::vector<int> def_regs = current_instruction->GetDefinitions();
- for (std::vector<int>::const_iterator reg_it = def_regs.begin();
- reg_it != def_regs.end(); reg_it++) {
- int current_defined_reg = (*reg_it);
- scoped_table->Add(current_defined_reg, current_instruction);
- }
- }
- // Fill in uses of phi functions in CFG successor regions.
- const std::vector<Region*>* successors = crt_region->GetSuccessors();
- for (std::vector<Region*>::const_iterator successors_it = successors->begin();
- successors_it != successors->end(); successors_it++) {
- Region* successor = (*successors_it);
- successor->SetPhiDefinitionsForUses(scoped_table, crt_region);
- }
-
- // Rename all successors in the dominators tree.
- const std::set<Region*>* dominated_nodes = crt_region->GetIDominatedSet();
- for (std::set<Region*>::const_iterator dominated_nodes_it = dominated_nodes->begin();
- dominated_nodes_it != dominated_nodes->end(); dominated_nodes_it++) {
- Region* dominated_node = (*dominated_nodes_it);
- RenameAsSSA(dominated_node, scoped_table);
- }
- scoped_table->CloseScope();
-}
-
-CodeGenData* SeaGraph::GenerateLLVM(const std::string& function_name,
- const art::DexFile& dex_file) {
- // Pass: Generate LLVM IR.
- CodeGenPrepassVisitor code_gen_prepass_visitor(function_name);
- std::cout << "Generating code..." << std::endl;
- Accept(&code_gen_prepass_visitor);
- CodeGenVisitor code_gen_visitor(code_gen_prepass_visitor.GetData(), dex_file);
- Accept(&code_gen_visitor);
- CodeGenPostpassVisitor code_gen_postpass_visitor(code_gen_visitor.GetData());
- Accept(&code_gen_postpass_visitor);
- return code_gen_postpass_visitor.GetData();
-}
-
-CodeGenData* SeaGraph::CompileMethod(
- const std::string& function_name,
- const art::DexFile::CodeItem* code_item, uint16_t class_def_idx,
- uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file) {
- // Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
- BuildMethodSeaGraph(code_item, dex_file, class_def_idx, method_idx, method_access_flags);
- // Pass: Compute reverse post-order of regions.
- ComputeRPO();
- // Multiple passes: compute immediate dominators.
- ComputeIDominators();
- // Pass: compute downward-exposed definitions.
- ComputeDownExposedDefs();
- // Multiple Passes (iterative fixed-point algorithm): Compute reaching definitions
- ComputeReachingDefs();
- // Pass (O(nlogN)): Compute the dominance frontier for region nodes.
- ComputeDominanceFrontier();
- // Two Passes: Phi node insertion.
- ConvertToSSA();
- // Pass: type inference
- ti_->ComputeTypes(this);
- // Pass: Generate LLVM IR.
- CodeGenData* cgd = GenerateLLVM(function_name, dex_file);
- return cgd;
-}
-
-void SeaGraph::ComputeDominanceFrontier() {
- for (std::vector<Region*>::iterator region_it = regions_.begin();
- region_it != regions_.end(); region_it++) {
- std::vector<Region*>* preds = (*region_it)->GetPredecessors();
- if (preds->size() > 1) {
- for (std::vector<Region*>::iterator pred_it = preds->begin();
- pred_it != preds->end(); pred_it++) {
- Region* runner = *pred_it;
- while (runner != (*region_it)->GetIDominator()) {
- runner->AddToDominanceFrontier(*region_it);
- runner = runner->GetIDominator();
- }
- }
- }
- }
-}
-
-Region* SeaGraph::GetNewRegion() {
- Region* new_region = new Region();
- AddRegion(new_region);
- return new_region;
-}
-
-void SeaGraph::AddRegion(Region* r) {
- DCHECK(r) << "Tried to add NULL region to SEA graph.";
- regions_.push_back(r);
-}
-
-SeaGraph::SeaGraph(const art::DexFile& df)
- :ti_(new TypeInference()), class_def_idx_(0), method_idx_(0), method_access_flags_(),
- regions_(), parameters_(), dex_file_(df), code_item_(NULL) { }
-
-void Region::AddChild(sea_ir::InstructionNode* instruction) {
- DCHECK(instruction) << "Tried to add NULL instruction to region node.";
- instructions_.push_back(instruction);
- instruction->SetRegion(this);
-}
-
-SeaNode* Region::GetLastChild() const {
- if (instructions_.size() > 0) {
- return instructions_.back();
- }
- return NULL;
-}
-
-void Region::ComputeDownExposedDefs() {
- for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
- inst_it != instructions_.end(); inst_it++) {
- int reg_no = (*inst_it)->GetResultRegister();
- std::map<int, InstructionNode*>::iterator res = de_defs_.find(reg_no);
- if ((reg_no != NO_REGISTER) && (res == de_defs_.end())) {
- de_defs_.insert(std::pair<int, InstructionNode*>(reg_no, *inst_it));
- } else {
- res->second = *inst_it;
- }
- }
- for (std::map<int, sea_ir::InstructionNode*>::const_iterator cit = de_defs_.begin();
- cit != de_defs_.end(); cit++) {
- (*cit).second->MarkAsDEDef();
- }
-}
-
-const std::map<int, sea_ir::InstructionNode*>* Region::GetDownExposedDefs() const {
- return &de_defs_;
-}
-
-std::map<int, std::set<sea_ir::InstructionNode*>* >* Region::GetReachingDefs() {
- return &reaching_defs_;
-}
-
-bool Region::UpdateReachingDefs() {
- std::map<int, std::set<sea_ir::InstructionNode*>* > new_reaching;
- for (std::vector<Region*>::const_iterator pred_it = predecessors_.begin();
- pred_it != predecessors_.end(); pred_it++) {
- // The reaching_defs variable will contain reaching defs __for current predecessor only__
- std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs;
- std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching =
- (*pred_it)->GetReachingDefs();
- const std::map<int, InstructionNode*>* de_defs = (*pred_it)->GetDownExposedDefs();
-
- // The definitions from the reaching set of the predecessor
- // may be shadowed by downward exposed definitions from the predecessor,
- // otherwise the defs from the reaching set are still good.
- for (std::map<int, InstructionNode*>::const_iterator de_def = de_defs->begin();
- de_def != de_defs->end(); de_def++) {
- std::set<InstructionNode*>* solo_def;
- solo_def = new std::set<InstructionNode*>();
- solo_def->insert(de_def->second);
- reaching_defs.insert(
- std::pair<int const, std::set<InstructionNode*>*>(de_def->first, solo_def));
- }
- reaching_defs.insert(pred_reaching->begin(), pred_reaching->end());
-
- // Now we combine the reaching map coming from the current predecessor (reaching_defs)
- // with the accumulated set from all predecessors so far (from new_reaching).
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
- reaching_defs.begin();
- for (; reaching_it != reaching_defs.end(); reaching_it++) {
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator crt_entry =
- new_reaching.find(reaching_it->first);
- if (new_reaching.end() != crt_entry) {
- crt_entry->second->insert(reaching_it->second->begin(), reaching_it->second->end());
- } else {
- new_reaching.insert(
- std::pair<int, std::set<sea_ir::InstructionNode*>*>(
- reaching_it->first,
- reaching_it->second) );
- }
- }
- }
- bool changed = false;
- // Because the sets are monotonically increasing,
- // we can compare sizes instead of using set comparison.
- // TODO: Find formal proof.
- int old_size = 0;
- if (-1 == reaching_defs_size_) {
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
- reaching_defs_.begin();
- for (; reaching_it != reaching_defs_.end(); reaching_it++) {
- old_size += (*reaching_it).second->size();
- }
- } else {
- old_size = reaching_defs_size_;
- }
- int new_size = 0;
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = new_reaching.begin();
- for (; reaching_it != new_reaching.end(); reaching_it++) {
- new_size += (*reaching_it).second->size();
- }
- if (old_size != new_size) {
- changed = true;
- }
- if (changed) {
- reaching_defs_ = new_reaching;
- reaching_defs_size_ = new_size;
- }
- return changed;
-}
-
-bool Region::InsertPhiFor(int reg_no) {
- if (!ContainsPhiFor(reg_no)) {
- phi_set_.insert(reg_no);
- PhiInstructionNode* new_phi = new PhiInstructionNode(reg_no);
- new_phi->SetRegion(this);
- phi_instructions_.push_back(new_phi);
- return true;
- }
- return false;
-}
-
-void Region::SetPhiDefinitionsForUses(
- const utils::ScopedHashtable<int, InstructionNode*>* scoped_table, Region* predecessor) {
- int predecessor_id = -1;
- for (unsigned int crt_pred_id = 0; crt_pred_id < predecessors_.size(); crt_pred_id++) {
- if (predecessors_.at(crt_pred_id) == predecessor) {
- predecessor_id = crt_pred_id;
- }
- }
- DCHECK_NE(-1, predecessor_id);
- for (std::vector<PhiInstructionNode*>::iterator phi_it = phi_instructions_.begin();
- phi_it != phi_instructions_.end(); phi_it++) {
- PhiInstructionNode* phi = (*phi_it);
- int reg_no = phi->GetRegisterNumber();
- InstructionNode* definition = scoped_table->Lookup(reg_no);
- phi->RenameToSSA(reg_no, definition, predecessor_id);
- }
-}
-
-std::vector<InstructionNode*> InstructionNode::Create(const art::Instruction* in) {
- std::vector<InstructionNode*> sea_instructions;
- switch (in->Opcode()) {
- case art::Instruction::CONST_4:
- sea_instructions.push_back(new ConstInstructionNode(in));
- break;
- case art::Instruction::RETURN:
- sea_instructions.push_back(new ReturnInstructionNode(in));
- break;
- case art::Instruction::IF_NE:
- sea_instructions.push_back(new IfNeInstructionNode(in));
- break;
- case art::Instruction::ADD_INT_LIT8:
- sea_instructions.push_back(new UnnamedConstInstructionNode(in, in->VRegC_22b()));
- sea_instructions.push_back(new AddIntLitInstructionNode(in));
- break;
- case art::Instruction::MOVE_RESULT:
- sea_instructions.push_back(new MoveResultInstructionNode(in));
- break;
- case art::Instruction::INVOKE_STATIC:
- sea_instructions.push_back(new InvokeStaticInstructionNode(in));
- break;
- case art::Instruction::ADD_INT:
- sea_instructions.push_back(new AddIntInstructionNode(in));
- break;
- case art::Instruction::GOTO:
- sea_instructions.push_back(new GotoInstructionNode(in));
- break;
- case art::Instruction::IF_EQZ:
- sea_instructions.push_back(new IfEqzInstructionNode(in));
- break;
- default:
- // Default, generic IR instruction node; default case should never be reached
- // when support for all instructions ahs been added.
- sea_instructions.push_back(new InstructionNode(in));
- }
- return sea_instructions;
-}
-
-void InstructionNode::MarkAsDEDef() {
- de_def_ = true;
-}
-
-int InstructionNode::GetResultRegister() const {
- if (instruction_->HasVRegA() && InstructionTools::IsDefinition(instruction_)) {
- return instruction_->VRegA();
- }
- return NO_REGISTER;
-}
-
-std::vector<int> InstructionNode::GetDefinitions() const {
- // TODO: Extend this to handle instructions defining more than one register (if any)
- // The return value should be changed to pointer to field then; for now it is an object
- // so that we avoid possible memory leaks from allocating objects dynamically.
- std::vector<int> definitions;
- int result = GetResultRegister();
- if (NO_REGISTER != result) {
- definitions.push_back(result);
- }
- return definitions;
-}
-
-std::vector<int> InstructionNode::GetUses() const {
- std::vector<int> uses; // Using vector<> instead of set<> because order matters.
- if (!InstructionTools::IsDefinition(instruction_) && (instruction_->HasVRegA())) {
- int vA = instruction_->VRegA();
- uses.push_back(vA);
- }
- if (instruction_->HasVRegB()) {
- int vB = instruction_->VRegB();
- uses.push_back(vB);
- }
- if (instruction_->HasVRegC()) {
- int vC = instruction_->VRegC();
- uses.push_back(vC);
- }
- return uses;
-}
-} // namespace sea_ir
diff --git a/compiler/sea_ir/ir/sea.h b/compiler/sea_ir/ir/sea.h
deleted file mode 100644
index 26b16be019..0000000000
--- a/compiler/sea_ir/ir/sea.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ART_COMPILER_SEA_IR_IR_SEA_H_
-#define ART_COMPILER_SEA_IR_IR_SEA_H_
-
-#include <set>
-#include <map>
-
-#include "utils/scoped_hashtable.h"
-#include "gtest/gtest_prod.h"
-#include "dex_file.h"
-#include "dex_instruction.h"
-#include "sea_ir/ir/instruction_tools.h"
-#include "sea_ir/ir/instruction_nodes.h"
-
-namespace sea_ir {
-
-// Reverse post-order numbering constants
-enum RegionNumbering {
- NOT_VISITED = -1,
- VISITING = -2
-};
-
-class TypeInference;
-class CodeGenData;
-
-class Region;
-class InstructionNode;
-class PhiInstructionNode;
-class SignatureNode;
-
-// A SignatureNode is a declaration of one parameter in the function signature.
-// This class is used to provide place-holder definitions to which instructions
-// can return from the GetSSAUses() calls, instead of having missing SSA edges.
-class SignatureNode: public InstructionNode {
- public:
- // Creates a new signature node representing the initial definition of the
- // register @register_no which is the @signature_position-th argument to the method.
- explicit SignatureNode(unsigned int register_no, unsigned int signature_position):
- InstructionNode(NULL), register_no_(register_no), position_(signature_position) { }
-
- int GetResultRegister() const {
- return register_no_;
- }
-
- unsigned int GetPositionInSignature() const {
- return position_;
- }
-
- std::vector<int> GetUses() const {
- return std::vector<int>();
- }
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- private:
- const unsigned int register_no_;
- const unsigned int position_; // The position of this parameter node is
- // in the function parameter list.
-};
-
-class PhiInstructionNode: public InstructionNode {
- public:
- explicit PhiInstructionNode(int register_no):
- InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
- // Returns the register on which this phi-function is used.
- int GetRegisterNumber() const {
- return register_no_;
- }
-
- // Renames the use of @reg_no to refer to the instruction @definition.
- // Phi-functions are different than normal instructions in that they
- // have multiple predecessor regions; this is why RenameToSSA has
- // the additional parameter specifying that @parameter_id is the incoming
- // edge for @definition, essentially creating SSA form.
- void RenameToSSA(int reg_no, InstructionNode* definition, unsigned int predecessor_id) {
- DCHECK(NULL != definition) << "Tried to rename to SSA using a NULL definition for "
- << StringId() << " register " << reg_no;
- if (definition_edges_.size() < predecessor_id+1) {
- definition_edges_.resize(predecessor_id+1, NULL);
- }
- if (NULL == definition_edges_.at(predecessor_id)) {
- definition_edges_[predecessor_id] = new std::vector<InstructionNode*>();
- }
- definition_edges_[predecessor_id]->push_back(definition);
- definition->AddSSAUse(this);
- }
-
- // Returns the ordered set of Instructions that define the input operands of this instruction.
- // Precondition: SeaGraph.ConvertToSSA().
- std::vector<InstructionNode*> GetSSAProducers() {
- std::vector<InstructionNode*> producers;
- for (std::vector<std::vector<InstructionNode*>*>::const_iterator
- cit = definition_edges_.begin(); cit != definition_edges_.end(); cit++) {
- producers.insert(producers.end(), (*cit)->begin(), (*cit)->end());
- }
- return producers;
- }
-
- // Returns the instruction that defines the phi register from predecessor
- // on position @predecessor_pos. Note that the return value is vector<> just
- // for consistency with the return value of GetSSAUses() on regular instructions,
- // The returned vector should always have a single element because the IR is SSA.
- std::vector<InstructionNode*>* GetSSAUses(int predecessor_pos) {
- return definition_edges_.at(predecessor_pos);
- }
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- private:
- int register_no_;
- // This vector has one entry for each predecessors, each with a single
- // element, storing the id of the instruction that defines the register
- // corresponding to this phi function.
- std::vector<std::vector<InstructionNode*>*> definition_edges_;
-};
-
-// This class corresponds to a basic block in traditional compiler IRs.
-// The dataflow analysis relies on this class both during execution and
-// for storing its results.
-class Region : public SeaNode {
- public:
- explicit Region():
- SeaNode(), successors_(), predecessors_(), reaching_defs_size_(0),
- rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() {
- string_id_ = "cluster_" + string_id_;
- }
- // Adds @instruction as an instruction node child in the current region.
- void AddChild(sea_ir::InstructionNode* instruction);
- // Returns the last instruction node child of the current region.
- // This child has the CFG successors pointing to the new regions.
- SeaNode* GetLastChild() const;
- // Returns all the child instructions of this region, in program order.
- std::vector<InstructionNode*>* GetInstructions() {
- return &instructions_;
- }
-
- // Computes Downward Exposed Definitions for the current node.
- void ComputeDownExposedDefs();
- const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
- // Performs one iteration of the reaching definitions algorithm
- // and returns true if the reaching definitions set changed.
- bool UpdateReachingDefs();
- // Returns the set of reaching definitions for the current region.
- std::map<int, std::set<sea_ir::InstructionNode*>* >* GetReachingDefs();
-
- void SetRPO(int rpo) {
- rpo_number_ = rpo;
- }
-
- int GetRPO() {
- return rpo_number_;
- }
-
- void SetIDominator(Region* dom) {
- idom_ = dom;
- }
-
- Region* GetIDominator() const {
- return idom_;
- }
-
- void AddToIDominatedSet(Region* dominated) {
- idominated_set_.insert(dominated);
- }
-
- const std::set<Region*>* GetIDominatedSet() {
- return &idominated_set_;
- }
- // Adds @df_reg to the dominance frontier of the current region.
- void AddToDominanceFrontier(Region* df_reg) {
- df_.insert(df_reg);
- }
- // Returns the dominance frontier of the current region.
- // Preconditions: SeaGraph.ComputeDominanceFrontier()
- std::set<Region*>* GetDominanceFrontier() {
- return &df_;
- }
- // Returns true if the region contains a phi function for @reg_no.
- bool ContainsPhiFor(int reg_no) {
- return (phi_set_.end() != phi_set_.find(reg_no));
- }
- // Returns the phi-functions from the region.
- std::vector<PhiInstructionNode*>* GetPhiNodes() {
- return &phi_instructions_;
- }
- // Adds a phi-function for @reg_no to this region.
- // Note: The insertion order does not matter, as phi-functions
- // are conceptually executed at the same time.
- bool InsertPhiFor(int reg_no);
- // Sets the phi-function uses to be as defined in @scoped_table for predecessor @@predecessor.
- void SetPhiDefinitionsForUses(const utils::ScopedHashtable<int, InstructionNode*>* scoped_table,
- Region* predecessor);
-
- void Accept(IRVisitor* v) {
- v->Visit(this);
- v->Traverse(this);
- }
-
- void AddSuccessor(Region* successor) {
- DCHECK(successor) << "Tried to add NULL successor to SEA node.";
- successors_.push_back(successor);
- return;
- }
- void AddPredecessor(Region* predecessor) {
- DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
- predecessors_.push_back(predecessor);
- }
-
- std::vector<sea_ir::Region*>* GetSuccessors() {
- return &successors_;
- }
- std::vector<sea_ir::Region*>* GetPredecessors() {
- return &predecessors_;
- }
-
- private:
- std::vector<sea_ir::Region*> successors_; // CFG successor nodes (regions)
- std::vector<sea_ir::Region*> predecessors_; // CFG predecessor nodes (instructions/regions)
- std::vector<sea_ir::InstructionNode*> instructions_;
- std::map<int, sea_ir::InstructionNode*> de_defs_;
- std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs_;
- int reaching_defs_size_;
- int rpo_number_; // reverse postorder number of the region
- // Immediate dominator node.
- Region* idom_;
- // The set of nodes immediately dominated by the region.
- std::set<Region*> idominated_set_;
- // Records the dominance frontier.
- std::set<Region*> df_;
- // Records the set of register numbers that have phi nodes in this region.
- std::set<int> phi_set_;
- std::vector<PhiInstructionNode*> phi_instructions_;
-};
-
-// A SeaGraph instance corresponds to a source code function.
-// Its main point is to encapsulate the SEA IR representation of it
-// and acts as starting point for visitors (ex: during code generation).
-class SeaGraph: IVisitable {
- public:
- static SeaGraph* GetGraph(const art::DexFile&);
-
- CodeGenData* CompileMethod(const std::string& function_name,
- const art::DexFile::CodeItem* code_item, uint16_t class_def_idx,
- uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file);
- // Returns all regions corresponding to this SeaGraph.
- std::vector<Region*>* GetRegions() {
- return &regions_;
- }
- // Recursively computes the reverse postorder value for @crt_bb and successors.
- static void ComputeRPO(Region* crt_bb, int& crt_rpo);
- // Returns the "lowest common ancestor" of @i and @j in the dominator tree.
- static Region* Intersect(Region* i, Region* j);
- // Returns the vector of parameters of the function.
- std::vector<SignatureNode*>* GetParameterNodes() {
- return &parameters_;
- }
-
- const art::DexFile* GetDexFile() const {
- return &dex_file_;
- }
-
- virtual void Accept(IRVisitor* visitor) {
- visitor->Initialize(this);
- visitor->Visit(this);
- visitor->Traverse(this);
- }
-
- TypeInference* ti_;
- uint16_t class_def_idx_;
- uint32_t method_idx_;
- uint32_t method_access_flags_;
-
- protected:
- explicit SeaGraph(const art::DexFile& df);
- virtual ~SeaGraph() { }
-
- private:
- FRIEND_TEST(RegionsTest, Basics);
- // Registers @childReg as a region belonging to the SeaGraph instance.
- void AddRegion(Region* childReg);
- // Returns new region and registers it with the SeaGraph instance.
- Region* GetNewRegion();
- // Adds a (formal) parameter node to the vector of parameters of the function.
- void AddParameterNode(SignatureNode* parameterNode) {
- parameters_.push_back(parameterNode);
- }
- // Adds a CFG edge from @src node to @dst node.
- void AddEdge(Region* src, Region* dst) const;
- // Builds the non-SSA sea-ir representation of the function @code_item from @dex_file
- // with class id @class_def_idx and method id @method_idx.
- void BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
- const art::DexFile& dex_file, uint16_t class_def_idx,
- uint32_t method_idx, uint32_t method_access_flags);
- // Computes immediate dominators for each region.
- // Precondition: ComputeMethodSeaGraph()
- void ComputeIDominators();
- // Computes Downward Exposed Definitions for all regions in the graph.
- void ComputeDownExposedDefs();
- // Computes the reaching definitions set following the equations from
- // Cooper & Torczon, "Engineering a Compiler", second edition, page 491.
- // Precondition: ComputeDEDefs()
- void ComputeReachingDefs();
- // Computes the reverse-postorder numbering for the region nodes.
- // Precondition: ComputeDEDefs()
- void ComputeRPO();
- // Computes the dominance frontier for all regions in the graph,
- // following the algorithm from
- // Cooper & Torczon, "Engineering a Compiler", second edition, page 499.
- // Precondition: ComputeIDominators()
- void ComputeDominanceFrontier();
- // Converts the IR to semi-pruned SSA form.
- void ConvertToSSA();
- // Performs the renaming phase of the SSA transformation during ConvertToSSA() execution.
- void RenameAsSSA();
- // Identifies the definitions corresponding to uses for region @node
- // by using the scoped hashtable of names @ scoped_table.
- void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
- // Generate LLVM IR for the method.
- // Precondition: ConvertToSSA().
- CodeGenData* GenerateLLVM(const std::string& function_name, const art::DexFile& dex_file);
- // Inserts one SignatureNode for each argument of the function in
- void InsertSignatureNodes(const art::DexFile::CodeItem* code_item, Region* r);
-
- static SeaGraph graph_;
- std::vector<Region*> regions_;
- std::vector<SignatureNode*> parameters_;
- const art::DexFile& dex_file_;
- const art::DexFile::CodeItem* code_item_;
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_IR_SEA_H_
diff --git a/compiler/sea_ir/ir/sea_node.h b/compiler/sea_ir/ir/sea_node.h
deleted file mode 100644
index 4dab5cba83..0000000000
--- a/compiler/sea_ir/ir/sea_node.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
-#define ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
-
-#include "base/stringprintf.h"
-
-namespace sea_ir {
-class Region;
-class IRVisitor;
-
-class IVisitable {
- public:
- virtual void Accept(IRVisitor* visitor) = 0;
- virtual ~IVisitable() {}
-};
-
-// This abstract class provides the essential services that
-// we want each SEA IR element to have.
-// At the moment, these are:
-// - an id and corresponding string representation.
-// - a .dot graph language representation for .dot output.
-//
-// Note that SEA IR nodes could also be Regions, Projects
-// which are not instructions.
-class SeaNode: public IVisitable {
- public:
- explicit SeaNode():id_(GetNewId()), string_id_() {
- string_id_ = art::StringPrintf("%d", id_);
- }
-
- // Adds CFG predecessors and successors to each block.
- void AddSuccessor(Region* successor);
- void AddPredecessor(Region* predecesor);
-
- // Returns the id of the current block as string
- const std::string& StringId() const {
- return string_id_;
- }
- // Returns the id of this node as int. The id is supposed to be unique among
- // all instances of all subclasses of this class.
- int Id() const {
- return id_;
- }
-
- virtual ~SeaNode() { }
-
- protected:
- static int GetNewId() {
- return current_max_node_id_++;
- }
-
- const int id_;
- std::string string_id_;
-
- private:
- static int current_max_node_id_;
- // Creating new instances of sea node objects should not be done through copy or assignment
- // operators because that would lead to duplication of their unique ids.
- DISALLOW_COPY_AND_ASSIGN(SeaNode);
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
diff --git a/compiler/sea_ir/ir/visitor.h b/compiler/sea_ir/ir/visitor.h
deleted file mode 100644
index cc7b5d153f..0000000000
--- a/compiler/sea_ir/ir/visitor.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_IR_VISITOR_H_
-#define ART_COMPILER_SEA_IR_IR_VISITOR_H_
-
-namespace sea_ir {
-
-class SeaGraph;
-class Region;
-class InstructionNode;
-class PhiInstructionNode;
-class SignatureNode;
-class UnnamedConstInstructionNode;
-class ConstInstructionNode;
-class ReturnInstructionNode;
-class IfNeInstructionNode;
-class AddIntLit8InstructionNode;
-class MoveResultInstructionNode;
-class InvokeStaticInstructionNode;
-class AddIntInstructionNode;
-class AddIntLitInstructionNode;
-class GotoInstructionNode;
-class IfEqzInstructionNode;
-
-
-
-
-class IRVisitor {
- public:
- explicit IRVisitor(): ordered_regions_() { }
- virtual void Initialize(SeaGraph* graph) = 0;
- virtual void Visit(SeaGraph* graph) = 0;
- virtual void Visit(Region* region) = 0;
- virtual void Visit(PhiInstructionNode* region) = 0;
- virtual void Visit(SignatureNode* region) = 0;
-
- virtual void Visit(InstructionNode* region) = 0;
- virtual void Visit(ConstInstructionNode* instruction) = 0;
- virtual void Visit(UnnamedConstInstructionNode* instruction) = 0;
- virtual void Visit(ReturnInstructionNode* instruction) = 0;
- virtual void Visit(IfNeInstructionNode* instruction) = 0;
- virtual void Visit(MoveResultInstructionNode* instruction) = 0;
- virtual void Visit(InvokeStaticInstructionNode* instruction) = 0;
- virtual void Visit(AddIntInstructionNode* instruction) = 0;
- virtual void Visit(GotoInstructionNode* instruction) = 0;
- virtual void Visit(IfEqzInstructionNode* instruction) = 0;
-
- // Note: This flavor of visitor separates the traversal functions from the actual visiting part
- // so that the Visitor subclasses don't duplicate code and can't get the traversal wrong.
- // The disadvantage is the increased number of functions (and calls).
- virtual void Traverse(SeaGraph* graph);
- virtual void Traverse(Region* region);
- // The following functions are meant to be empty and not pure virtual,
- // because the parameter classes have no children to traverse.
- virtual void Traverse(InstructionNode* region) { }
- virtual void Traverse(ConstInstructionNode* instruction) { }
- virtual void Traverse(ReturnInstructionNode* instruction) { }
- virtual void Traverse(IfNeInstructionNode* instruction) { }
- virtual void Traverse(AddIntLit8InstructionNode* instruction) { }
- virtual void Traverse(MoveResultInstructionNode* instruction) { }
- virtual void Traverse(InvokeStaticInstructionNode* instruction) { }
- virtual void Traverse(AddIntInstructionNode* instruction) { }
- virtual void Traverse(GotoInstructionNode* instruction) { }
- virtual void Traverse(IfEqzInstructionNode* instruction) { }
- virtual void Traverse(PhiInstructionNode* phi) { }
- virtual void Traverse(SignatureNode* sig) { }
- virtual ~IRVisitor() { }
-
- protected:
- std::vector<Region*> ordered_regions_;
-};
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_IR_VISITOR_H_
diff --git a/compiler/sea_ir/types/type_data_test.cc b/compiler/sea_ir/types/type_data_test.cc
deleted file mode 100644
index 42c6973c61..0000000000
--- a/compiler/sea_ir/types/type_data_test.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_compiler_test.h"
-#include "sea_ir/types/types.h"
-
-namespace sea_ir {
-
-class TypeDataTest : public art::CommonCompilerTest {};
-
-TEST_F(TypeDataTest, Basics) {
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- int first_instruction_id = 1;
- int second_instruction_id = 3;
- EXPECT_TRUE(NULL == td.FindTypeOf(first_instruction_id));
- const Type* int_type = &type_cache.Integer();
- const Type* byte_type = &type_cache.Byte();
- td.SetTypeOf(first_instruction_id, int_type);
- EXPECT_TRUE(int_type == td.FindTypeOf(first_instruction_id));
- EXPECT_TRUE(NULL == td.FindTypeOf(second_instruction_id));
- td.SetTypeOf(second_instruction_id, byte_type);
- EXPECT_TRUE(int_type == td.FindTypeOf(first_instruction_id));
- EXPECT_TRUE(byte_type == td.FindTypeOf(second_instruction_id));
-}
-
-} // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference.cc b/compiler/sea_ir/types/type_inference.cc
deleted file mode 100644
index 173198782e..0000000000
--- a/compiler/sea_ir/types/type_inference.cc
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "scoped_thread_state_change.h"
-#include "sea_ir/types/type_inference.h"
-#include "sea_ir/types/type_inference_visitor.h"
-#include "sea_ir/ir/sea.h"
-
-namespace sea_ir {
-
-bool TypeInference::IsPrimitiveDescriptor(char descriptor) {
- switch (descriptor) {
- case 'I':
- case 'C':
- case 'S':
- case 'B':
- case 'Z':
- case 'F':
- case 'D':
- case 'J':
- return true;
- default:
- return false;
- }
-}
-
-FunctionTypeInfo::FunctionTypeInfo(const SeaGraph* graph, art::verifier::RegTypeCache* types)
- : dex_file_(graph->GetDexFile()), dex_method_idx_(graph->method_idx_), type_cache_(types),
- method_access_flags_(graph->method_access_flags_) {
- const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
- declaring_class_ = &(type_cache_->FromDescriptor(NULL, descriptor, false));
-}
-
-FunctionTypeInfo::FunctionTypeInfo(const SeaGraph* graph, InstructionNode* inst,
- art::verifier::RegTypeCache* types): dex_file_(graph->GetDexFile()),
- dex_method_idx_(inst->GetInstruction()->VRegB_35c()), type_cache_(types),
- method_access_flags_(0) {
- // TODO: Test that GetDeclaredArgumentTypes() works correctly when using this constructor.
- const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
- declaring_class_ = &(type_cache_->FromDescriptor(NULL, descriptor, false));
-}
-
-const Type* FunctionTypeInfo::GetReturnValueType() {
- const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
- art::ScopedObjectAccess soa(art::Thread::Current());
- const Type& return_type = type_cache_->FromDescriptor(NULL, descriptor, false);
- return &return_type;
-}
-
-
-
-std::vector<const Type*> FunctionTypeInfo::GetDeclaredArgumentTypes() {
- art::ScopedObjectAccess soa(art::Thread::Current());
- std::vector<const Type*> argument_types;
- // TODO: The additional (fake) Method parameter is added on the first position,
- // but is represented as integer because we don't support pointers yet.
- argument_types.push_back(&(type_cache_->Integer()));
- // Include the "this" pointer.
- size_t cur_arg = 0;
- if (!IsStatic()) {
- // If this is a constructor for a class other than java.lang.Object, mark the first ("this")
- // argument as uninitialized. This restricts field access until the superclass constructor is
- // called.
- const art::verifier::RegType& declaring_class = GetDeclaringClass();
- if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
- argument_types.push_back(&(type_cache_->UninitializedThisArgument(declaring_class)));
- } else {
- argument_types.push_back(&declaring_class);
- }
- cur_arg++;
- }
- // Include the types of the parameters in the Java method signature.
- const art::DexFile::ProtoId& proto_id =
- dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
- art::DexFileParameterIterator iterator(*dex_file_, proto_id);
-
- for (; iterator.HasNext(); iterator.Next()) {
- const char* descriptor = iterator.GetDescriptor();
- if (descriptor == NULL) {
- LOG(FATAL) << "Error: Encountered null type descriptor for function argument.";
- }
- switch (descriptor[0]) {
- case 'L':
- case '[':
- // We assume that reference arguments are initialized. The only way it could be otherwise
- // (assuming the caller was verified) is if the current method is <init>, but in that case
- // it's effectively considered initialized the instant we reach here (in the sense that we
- // can return without doing anything or call virtual methods).
- {
- const Type& reg_type = type_cache_->FromDescriptor(NULL, descriptor, false);
- argument_types.push_back(&reg_type);
- }
- break;
- case 'Z':
- argument_types.push_back(&type_cache_->Boolean());
- break;
- case 'C':
- argument_types.push_back(&type_cache_->Char());
- break;
- case 'B':
- argument_types.push_back(&type_cache_->Byte());
- break;
- case 'I':
- argument_types.push_back(&type_cache_->Integer());
- break;
- case 'S':
- argument_types.push_back(&type_cache_->Short());
- break;
- case 'F':
- argument_types.push_back(&type_cache_->Float());
- break;
- case 'J':
- case 'D': {
- // TODO: Figure out strategy for two-register operands (double, long)
- LOG(FATAL) << "Error: Type inference for 64-bit variables has not been implemented.";
- break;
- }
- default:
- LOG(FATAL) << "Error: Unexpected signature encountered during type inference.";
- }
- cur_arg++;
- }
- return argument_types;
-}
-
-// TODO: Lock is only used for dumping types (during development). Remove this for performance.
-void TypeInference::ComputeTypes(SeaGraph* graph) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::vector<Region*>* regions = graph->GetRegions();
- std::list<InstructionNode*> worklist;
- // Fill the work-list with all instructions.
- for (std::vector<Region*>::const_iterator region_it = regions->begin();
- region_it != regions->end(); region_it++) {
- std::vector<PhiInstructionNode*>* phi_instructions = (*region_it)->GetPhiNodes();
- std::copy(phi_instructions->begin(), phi_instructions->end(), std::back_inserter(worklist));
- std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
- std::copy(instructions->begin(), instructions->end(), std::back_inserter(worklist));
- }
- TypeInferenceVisitor tiv(graph, &type_data_, type_cache_);
- // Record return type of the function.
- graph->Accept(&tiv);
- const Type* new_type = tiv.GetType();
- type_data_.SetTypeOf(-1, new_type); // TODO: Record this info in a way that
- // does not need magic constants.
- // Make SeaGraph a SeaNode?
-
- // Sparse (SSA) fixed-point algorithm that processes each instruction in the work-list,
- // adding consumers of instructions whose result changed type back into the work-list.
- // Note: According to [1] list iterators should not be invalidated on insertion,
- // which simplifies the implementation; not 100% sure other STL implementations
- // maintain this invariant, but they should.
- // [1] http://www.sgi.com/tech/stl/List.html
- // TODO: Making this conditional (as in sparse conditional constant propagation) would be good.
- // TODO: Remove elements as I go.
- for (std::list<InstructionNode*>::const_iterator instruction_it = worklist.begin();
- instruction_it != worklist.end(); instruction_it++) {
- (*instruction_it)->Accept(&tiv);
- const Type* old_type = type_data_.FindTypeOf((*instruction_it)->Id());
- const Type* new_type = tiv.GetType();
- bool type_changed = (old_type != new_type);
- if (type_changed) {
- type_data_.SetTypeOf((*instruction_it)->Id(), new_type);
- // Add SSA consumers of the current instruction to the work-list.
- std::vector<InstructionNode*>* consumers = (*instruction_it)->GetSSAConsumers();
- for (std::vector<InstructionNode*>::iterator consumer = consumers->begin();
- consumer != consumers->end(); consumer++) {
- worklist.push_back(*consumer);
- }
- }
- }
-}
-} // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference.h b/compiler/sea_ir/types/type_inference.h
deleted file mode 100644
index 7a178b29b8..0000000000
--- a/compiler/sea_ir/types/type_inference.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
-#define ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
-
-#include "safe_map.h"
-#include "dex_file-inl.h"
-#include "sea_ir/types/types.h"
-
-namespace sea_ir {
-
-class SeaGraph;
-class InstructionNode;
-
-// The type inference in SEA IR is different from the verifier in that it is concerned
-// with a rich type hierarchy (TODO) usable in optimization and does not perform
-// precise verification (which is the job of the verifier).
-class TypeInference {
- public:
- TypeInference() : type_cache_(new art::verifier::RegTypeCache(false)) {
- }
-
- // Computes the types for the method with SEA IR representation provided by @graph.
- void ComputeTypes(SeaGraph* graph);
-
- art::SafeMap<int, const Type*>* GetTypeMap() {
- return type_data_.GetTypeMap();
- }
- // Returns true if @descriptor corresponds to a primitive type.
- static bool IsPrimitiveDescriptor(char descriptor);
- TypeData type_data_; // TODO: Make private, add accessor and not publish a SafeMap above.
- art::verifier::RegTypeCache* const type_cache_; // TODO: Make private.
-};
-
-// Stores information about the exact type of a function.
-class FunctionTypeInfo {
- public:
- // Finds method information about the method encoded by a SEA IR graph.
- // @graph provides the input method SEA IR representation.
- // @types provides the input cache of types from which the
- // parameter types of the function are found.
- FunctionTypeInfo(const SeaGraph* graph, art::verifier::RegTypeCache* types);
- // Finds method information about the method encoded by
- // an invocation instruction in a SEA IR graph.
- // @graph provides the input method SEA IR representation.
- // @inst is an invocation instruction for the desired method.
- // @types provides the input cache of types from which the
- // parameter types of the function are found.
- FunctionTypeInfo(const SeaGraph* graph, InstructionNode* inst,
- art::verifier::RegTypeCache* types);
- // Returns the ordered vector of types corresponding to the function arguments.
- std::vector<const Type*> GetDeclaredArgumentTypes();
- // Returns the declared return value type.
- const Type* GetReturnValueType();
- // Returns the type corresponding to the class that declared the method.
- const Type& GetDeclaringClass() {
- return *declaring_class_;
- }
-
- bool IsConstructor() const {
- return (method_access_flags_ & kAccConstructor) != 0;
- }
-
- bool IsStatic() const {
- return (method_access_flags_ & kAccStatic) != 0;
- }
-
- protected:
- const Type* declaring_class_;
- const art::DexFile* dex_file_;
- const uint32_t dex_method_idx_;
- art::verifier::RegTypeCache* type_cache_;
- const uint32_t method_access_flags_; // Method's access flags.
-};
-} // namespace sea_ir
-
-#endif // ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
diff --git a/compiler/sea_ir/types/type_inference_visitor.cc b/compiler/sea_ir/types/type_inference_visitor.cc
deleted file mode 100644
index 27bb5d84a9..0000000000
--- a/compiler/sea_ir/types/type_inference_visitor.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "scoped_thread_state_change.h"
-#include "sea_ir/types/type_inference_visitor.h"
-#include "sea_ir/types/type_inference.h"
-#include "sea_ir/ir/sea.h"
-
-namespace sea_ir {
-
-void TypeInferenceVisitor::Visit(SeaGraph* graph) {
- FunctionTypeInfo fti(graph_, type_cache_);
- const Type* return_type = fti.GetReturnValueType();
- crt_type_.push_back(return_type);
-}
-
-void TypeInferenceVisitor::Visit(SignatureNode* parameter) {
- FunctionTypeInfo fti(graph_, type_cache_);
- std::vector<const Type*> arguments = fti.GetDeclaredArgumentTypes();
- DCHECK_LT(parameter->GetPositionInSignature(), arguments.size())
- << "Signature node position not present in signature.";
- crt_type_.push_back(arguments.at(parameter->GetPositionInSignature()));
-}
-
-void TypeInferenceVisitor::Visit(UnnamedConstInstructionNode* instruction) {
- crt_type_.push_back(&type_cache_->Integer());
-}
-
-void TypeInferenceVisitor::Visit(PhiInstructionNode* instruction) {
- std::vector<const Type*> types_to_merge = GetOperandTypes(instruction);
- const Type* result_type = MergeTypes(types_to_merge);
- crt_type_.push_back(result_type);
-}
-
-void TypeInferenceVisitor::Visit(AddIntInstructionNode* instruction) {
- std::vector<const Type*> operand_types = GetOperandTypes(instruction);
- for (std::vector<const Type*>::const_iterator cit = operand_types.begin();
- cit != operand_types.end(); cit++) {
- if (*cit != NULL) {
- DCHECK((*cit)->IsInteger());
- }
- }
- crt_type_.push_back(&type_cache_->Integer());
-}
-
-void TypeInferenceVisitor::Visit(MoveResultInstructionNode* instruction) {
- std::vector<const Type*> operand_types = GetOperandTypes(instruction);
- const Type* operand_type = operand_types.at(0);
- crt_type_.push_back(operand_type);
-}
-
-void TypeInferenceVisitor::Visit(InvokeStaticInstructionNode* instruction) {
- FunctionTypeInfo fti(graph_, instruction, type_cache_);
- const Type* result_type = fti.GetReturnValueType();
- crt_type_.push_back(result_type);
-}
-
-std::vector<const Type*> TypeInferenceVisitor::GetOperandTypes(
- InstructionNode* instruction) const {
- std::vector<InstructionNode*> sources = instruction->GetSSAProducers();
- std::vector<const Type*> types_to_merge;
- for (std::vector<InstructionNode*>::const_iterator cit = sources.begin(); cit != sources.end();
- cit++) {
- const Type* source_type = type_data_->FindTypeOf((*cit)->Id());
- if (source_type != NULL) {
- types_to_merge.push_back(source_type);
- }
- }
- return types_to_merge;
-}
-
-const Type* TypeInferenceVisitor::MergeTypes(std::vector<const Type*>& types) const {
- const Type* type = NULL;
- if (types.size() > 0) {
- type = *(types.begin());
- if (types.size() > 1) {
- for (std::vector<const Type*>::const_iterator cit = types.begin();
- cit != types.end(); cit++) {
- if (!type->Equals(**cit)) {
- type = MergeTypes(type, *cit);
- }
- }
- }
- }
- return type;
-}
-
-const Type* TypeInferenceVisitor::MergeTypes(const Type* t1, const Type* t2) const {
- DCHECK(t2 != NULL);
- DCHECK(t1 != NULL);
- art::ScopedObjectAccess soa(art::Thread::Current());
- const Type* result = &(t1->Merge(*t2, type_cache_));
- return result;
-}
-
-} // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference_visitor.h b/compiler/sea_ir/types/type_inference_visitor.h
deleted file mode 100644
index d7151518b0..0000000000
--- a/compiler/sea_ir/types/type_inference_visitor.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
-#define ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
-
-
-#include "dex_file-inl.h"
-#include "sea_ir/ir/visitor.h"
-#include "sea_ir/types/types.h"
-
-namespace sea_ir {
-
-// The TypeInferenceVisitor visits each instruction and computes its type taking into account
-// the current type of the operands. The type is stored in the visitor.
-// We may be better off by using a separate visitor type hierarchy that has return values
-// or that passes data as parameters, than to use fields to store information that should
-// in fact be returned after visiting each element. Ideally, I would prefer to use templates
-// to specify the returned value type, but I am not aware of a possible implementation
-// that does not horribly duplicate the visitor infrastructure code (version 1: no return value,
-// version 2: with template return value).
-class TypeInferenceVisitor: public IRVisitor {
- public:
- TypeInferenceVisitor(SeaGraph* graph, TypeData* type_data,
- art::verifier::RegTypeCache* types):
- graph_(graph), type_data_(type_data), type_cache_(types), crt_type_() {
- }
- // There are no type related actions to be performed on these classes.
- void Initialize(SeaGraph* graph) { }
- void Visit(SeaGraph* graph);
- void Visit(Region* region) { }
-
- void Visit(PhiInstructionNode* instruction);
- void Visit(SignatureNode* parameter);
- void Visit(InstructionNode* instruction) { }
- void Visit(UnnamedConstInstructionNode* instruction);
- void Visit(ConstInstructionNode* instruction) { }
- void Visit(ReturnInstructionNode* instruction) { }
- void Visit(IfNeInstructionNode* instruction) { }
- void Visit(MoveResultInstructionNode* instruction);
- void Visit(InvokeStaticInstructionNode* instruction);
- void Visit(AddIntInstructionNode* instruction);
- void Visit(GotoInstructionNode* instruction) { }
- void Visit(IfEqzInstructionNode* instruction) { }
-
- const Type* MergeTypes(std::vector<const Type*>& types) const;
- const Type* MergeTypes(const Type* t1, const Type* t2) const;
- std::vector<const Type*> GetOperandTypes(InstructionNode* instruction) const;
- const Type* GetType() {
- // TODO: Currently multiple defined types are not supported.
- if (!crt_type_.empty()) {
- const Type* single_type = crt_type_.at(0);
- crt_type_.clear();
- return single_type;
- }
- return NULL;
- }
-
- protected:
- const SeaGraph* const graph_;
- TypeData* type_data_;
- art::verifier::RegTypeCache* type_cache_;
- std::vector<const Type*> crt_type_; // Stored temporarily between two calls to Visit.
-};
-
-} // namespace sea_ir
-
-#endif // ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
diff --git a/compiler/sea_ir/types/type_inference_visitor_test.cc b/compiler/sea_ir/types/type_inference_visitor_test.cc
deleted file mode 100644
index ccb699137e..0000000000
--- a/compiler/sea_ir/types/type_inference_visitor_test.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_compiler_test.h"
-#include "sea_ir/types/type_inference_visitor.h"
-#include "sea_ir/ir/sea.h"
-
-namespace sea_ir {
-
-class TestInstructionNode:public InstructionNode {
- public:
- explicit TestInstructionNode(std::vector<InstructionNode*> prods): InstructionNode(NULL),
- producers_(prods) { }
- std::vector<InstructionNode*> GetSSAProducers() {
- return producers_;
- }
- protected:
- std::vector<InstructionNode*> producers_;
-};
-
-class TypeInferenceVisitorTest : public art::CommonCompilerTest {};
-
-TEST_F(TypeInferenceVisitorTest, MergeIntWithByte) {
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- const Type* int_type = &type_cache.Integer();
- const Type* byte_type = &type_cache.Byte();
- const Type* ib_type = tiv.MergeTypes(int_type, byte_type);
- const Type* bi_type = tiv.MergeTypes(byte_type, int_type);
- EXPECT_TRUE(ib_type == int_type);
- EXPECT_TRUE(bi_type == int_type);
-}
-
-TEST_F(TypeInferenceVisitorTest, MergeIntWithShort) {
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- const Type* int_type = &type_cache.Integer();
- const Type* short_type = &type_cache.Short();
- const Type* is_type = tiv.MergeTypes(int_type, short_type);
- const Type* si_type = tiv.MergeTypes(short_type, int_type);
- EXPECT_TRUE(is_type == int_type);
- EXPECT_TRUE(si_type == int_type);
-}
-
-TEST_F(TypeInferenceVisitorTest, MergeMultipleInts) {
- int N = 10; // Number of types to merge.
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- std::vector<const Type*> types;
- for (int i = 0; i < N; i++) {
- const Type* new_type = &type_cache.Integer();
- types.push_back(new_type);
- }
- const Type* merged_type = tiv.MergeTypes(types);
- EXPECT_TRUE(merged_type == &type_cache.Integer());
-}
-
-TEST_F(TypeInferenceVisitorTest, MergeMultipleShorts) {
- int N = 10; // Number of types to merge.
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- std::vector<const Type*> types;
- for (int i = 0; i < N; i++) {
- const Type* new_type = &type_cache.Short();
- types.push_back(new_type);
- }
- const Type* merged_type = tiv.MergeTypes(types);
- EXPECT_TRUE(merged_type == &type_cache.Short());
-}
-
-TEST_F(TypeInferenceVisitorTest, MergeMultipleIntsWithShorts) {
- int N = 10; // Number of types to merge.
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- std::vector<const Type*> types;
- for (int i = 0; i < N; i++) {
- const Type* short_type = &type_cache.Short();
- const Type* int_type = &type_cache.Integer();
- types.push_back(short_type);
- types.push_back(int_type);
- }
- const Type* merged_type = tiv.MergeTypes(types);
- EXPECT_TRUE(merged_type == &type_cache.Integer());
-}
-
-TEST_F(TypeInferenceVisitorTest, GetOperandTypes) {
- int N = 10; // Number of types to merge.
- TypeData td;
- art::verifier::RegTypeCache type_cache(false);
- TypeInferenceVisitor tiv(NULL, &td, &type_cache);
- std::vector<const Type*> types;
- std::vector<InstructionNode*> preds;
- for (int i = 0; i < N; i++) {
- const Type* short_type = &type_cache.Short();
- const Type* int_type = &type_cache.Integer();
- TestInstructionNode* short_inst =
- new TestInstructionNode(std::vector<InstructionNode*>());
- TestInstructionNode* int_inst =
- new TestInstructionNode(std::vector<InstructionNode*>());
- preds.push_back(short_inst);
- preds.push_back(int_inst);
- td.SetTypeOf(short_inst->Id(), short_type);
- td.SetTypeOf(int_inst->Id(), int_type);
- types.push_back(short_type);
- types.push_back(int_type);
- }
- TestInstructionNode* inst_to_test = new TestInstructionNode(preds);
- std::vector<const Type*> result = tiv.GetOperandTypes(inst_to_test);
- EXPECT_TRUE(result.size() == types.size());
- EXPECT_TRUE(true == std::equal(types.begin(), types.begin() + 2, result.begin()));
-}
-
-
-} // namespace sea_ir
diff --git a/compiler/sea_ir/types/types.h b/compiler/sea_ir/types/types.h
deleted file mode 100644
index 64f25243d0..0000000000
--- a/compiler/sea_ir/types/types.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_SEA_IR_TYPES_TYPES_H_
-#define ART_COMPILER_SEA_IR_TYPES_TYPES_H_
-
-#include "safe_map.h"
-#include "verifier/reg_type.h"
-#include "verifier/reg_type_cache.h"
-
-namespace sea_ir {
-
-// TODO: Replace typedef with an actual class implementation when we have more types.
-typedef art::verifier::RegType Type;
-
-// Stores information about the result type of each instruction.
-// Note: Main purpose is to encapsulate the map<instruction id, type*>,
-// so that we can replace the underlying storage at any time.
-class TypeData {
- public:
- art::SafeMap<int, const Type*>* GetTypeMap() {
- return &type_map_;
- }
- // Returns the type associated with instruction with @instruction_id.
- const Type* FindTypeOf(int instruction_id) {
- art::SafeMap<int, const Type*>::const_iterator result_it = type_map_.find(instruction_id);
- if (type_map_.end() != result_it) {
- return result_it->second;
- }
- return NULL;
- }
-
- // Saves the fact that instruction @instruction_id produces a value of type @type.
- void SetTypeOf(int instruction_id, const Type* type) {
- type_map_.Overwrite(instruction_id, type);
- }
-
- private:
- art::SafeMap<int, const Type*> type_map_;
-};
-
-
-
-} // namespace sea_ir
-#endif // ART_COMPILER_SEA_IR_TYPES_TYPES_H_
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index cb07ffae84..385d1340fc 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -40,8 +40,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
__ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
__ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
break;
- case kPortableAbi: // R9 holds Thread*.
- case kQuickAbi: // Fall-through.
+ case kQuickAbi: // R9 holds Thread*.
__ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
}
__ bkpt(0);
@@ -75,8 +74,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
Arm64ManagedRegister::FromXRegister(IP0));
break;
- case kPortableAbi: // X18 holds Thread*.
- case kQuickAbi: // Fall-through.
+ case kQuickAbi: // X18 holds Thread*.
__ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()),
Arm64ManagedRegister::FromXRegister(IP0));
@@ -106,8 +104,7 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
__ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
__ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
break;
- case kPortableAbi: // S1 holds Thread*.
- case kQuickAbi: // Fall-through.
+ case kQuickAbi: // S1 holds Thread*.
__ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
}
__ Jr(T9);
diff --git a/compiler/utils/scoped_hashtable.h b/compiler/utils/scoped_hashtable.h
deleted file mode 100644
index bf8dd1fca7..0000000000
--- a/compiler/utils/scoped_hashtable.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
-
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stddef.h>
-#include <map>
-#include <list>
-
-#ifndef ART_COMPILER_UTILS_SCOPED_HASHTABLE_H_
-#define ART_COMPILER_UTILS_SCOPED_HASHTABLE_H_
-
-namespace utils {
-template <typename K, typename V>
-class ScopedHashtable {
- public:
- explicit ScopedHashtable():scopes() {
- }
-
- void OpenScope() {
- scopes.push_front(std::map<K, V>());
- }
-
- // Lookups entry K starting from the current (topmost) scope
- // and returns its value if found or NULL.
- V Lookup(K k) const {
- for (typename std::list<std::map<K, V>>::const_iterator scopes_it = scopes.begin();
- scopes_it != scopes.end(); scopes_it++) {
- typename std::map<K, V>::const_iterator result_it = (*scopes_it).find(k);
- if (result_it != (*scopes_it).end()) {
- return (*result_it).second;
- }
- }
- return NULL;
- }
-
- // Adds a new entry in the current (topmost) scope.
- void Add(K k, V v) {
- scopes.front().erase(k);
- scopes.front().insert(std::pair< K, V >(k, v));
- }
-
- // Removes the topmost scope.
- bool CloseScope() {
- // Added check to uniformly handle undefined behavior
- // when removing scope and the list of scopes is empty.
- if (scopes.size() > 0) {
- scopes.pop_front();
- return true;
- }
- return false;
- }
-
- private:
- std::list<std::map<K, V>> scopes;
-};
-} // namespace utils
-
-#endif // ART_COMPILER_UTILS_SCOPED_HASHTABLE_H_
diff --git a/compiler/utils/scoped_hashtable_test.cc b/compiler/utils/scoped_hashtable_test.cc
deleted file mode 100644
index 1c843ebef1..0000000000
--- a/compiler/utils/scoped_hashtable_test.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "scoped_hashtable.h"
-
-#include "common_runtime_test.h"
-
-using utils::ScopedHashtable;
-
-namespace art {
-
-class Value {
- public:
- explicit Value(int v):value_(v) {}
- int value_;
-};
-
-class ScopedHashtableTest : public testing::Test {};
-
-TEST_F(ScopedHashtableTest, Basics) {
- ScopedHashtable<int, Value*> sht;
- // Check table is empty when no scope is open.
- EXPECT_TRUE(NULL == sht.Lookup(1));
-
- // Check table is empty when scope open.
- sht.OpenScope();
- EXPECT_TRUE(NULL == sht.Lookup(1));
- // Check table is empty after closing scope.
- EXPECT_EQ(sht.CloseScope(), true);
- // Check closing scope on empty table is no-op.
- EXPECT_EQ(sht.CloseScope(), false);
- // Check that find in current scope works.
- sht.OpenScope();
- sht.Add(1, new Value(1));
- EXPECT_EQ(sht.Lookup(1)->value_, 1);
- // Check that updating values in current scope works.
- sht.Add(1, new Value(2));
- EXPECT_EQ(sht.Lookup(1)->value_, 2);
- // Check that find works in previous scope.
- sht.OpenScope();
- EXPECT_EQ(sht.Lookup(1)->value_, 2);
- // Check that shadowing scopes works.
- sht.Add(1, new Value(3));
- EXPECT_EQ(sht.Lookup(1)->value_, 3);
- // Check that having multiple keys work correctly.
- sht.Add(2, new Value(4));
- EXPECT_EQ(sht.Lookup(1)->value_, 3);
- EXPECT_EQ(sht.Lookup(2)->value_, 4);
- // Check that scope removal works corectly.
- sht.CloseScope();
- EXPECT_EQ(sht.Lookup(1)->value_, 2);
- EXPECT_TRUE(NULL == sht.Lookup(2));
-}
-
-} // namespace art
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 00661f4932..2cbfffaea4 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -130,9 +130,6 @@ static void UsageError(const char* fmt, ...) {
UsageError(" --oat-symbols=<file.oat>: specifies the oat output destination with full symbols.");
UsageError(" Example: --oat-symbols=/symbols/system/framework/boot.oat");
UsageError("");
- UsageError(" --bitcode=<file.bc>: specifies the optional bitcode filename.");
- UsageError(" Example: --bitcode=/system/framework/boot.bc");
- UsageError("");
UsageError(" --image=<file.art>: specifies the output image filename.");
UsageError(" Example: --image=/system/framework/boot.art");
UsageError("");
@@ -162,12 +159,10 @@ static void UsageError(const char* fmt, ...) {
UsageError(" --compile-pic: Force indirect use of code, methods, and classes");
UsageError(" Default: disabled");
UsageError("");
- UsageError(" --compiler-backend=(Quick|Optimizing|Portable): select compiler backend");
+ UsageError(" --compiler-backend=(Quick|Optimizing): select compiler backend");
UsageError(" set.");
- UsageError(" Example: --compiler-backend=Portable");
- if (kUsePortableCompiler) {
- UsageError(" Default: Portable");
- } else if (kUseOptimizingCompiler) {
+ UsageError(" Example: --compiler-backend=Optimizing");
+ if (kUseOptimizingCompiler) {
UsageError(" Default: Optimizing");
} else {
UsageError(" Default: Quick");
@@ -221,8 +216,6 @@ static void UsageError(const char* fmt, ...) {
UsageError(" Example: --num-dex-method=%d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError(" Default: %d", CompilerOptions::kDefaultNumDexMethodsThreshold);
UsageError("");
- UsageError(" --host: used with Portable backend to link against host runtime libraries");
- UsageError("");
UsageError(" --dump-timing: display a breakdown of where time was spent");
UsageError("");
UsageError(" --include-patch-information: Include patching information so the generated code");
@@ -356,9 +349,8 @@ class WatchDog {
// Debug builds are slower so they have larger timeouts.
static const unsigned int kSlowdownFactor = kIsDebugBuild ? 5U : 1U;
- static const unsigned int kWatchDogTimeoutSeconds = kUsePortableCompiler ?
- kSlowdownFactor * 30 * 60 : // 30 minutes scaled by kSlowdownFactor (portable).
- kSlowdownFactor * 6 * 60; // 6 minutes scaled by kSlowdownFactor (not-portable).
+ // 6 minutes scaled by kSlowdownFactor.
+ static const unsigned int kWatchDogTimeoutSeconds = kSlowdownFactor * 6 * 60;
bool is_watch_dog_enabled_;
bool shutting_down_;
@@ -404,9 +396,7 @@ static void ParseDouble(const std::string& option, char after_char, double min,
class Dex2Oat FINAL {
public:
explicit Dex2Oat(TimingLogger* timings) :
- compiler_kind_(kUsePortableCompiler
- ? Compiler::kPortable
- : (kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick)),
+ compiler_kind_(kUseOptimizingCompiler ? Compiler::kOptimizing : Compiler::kQuick),
instruction_set_(kRuntimeISA),
// Take the default set of instruction features from the build.
method_inliner_map_(),
@@ -522,8 +512,6 @@ class Dex2Oat FINAL {
}
} else if (option.starts_with("--oat-location=")) {
oat_location_ = option.substr(strlen("--oat-location=")).data();
- } else if (option.starts_with("--bitcode=")) {
- bitcode_filename_ = option.substr(strlen("--bitcode=")).data();
} else if (option.starts_with("--image=")) {
image_filename_ = option.substr(strlen("--image=")).data();
} else if (option.starts_with("--image-classes=")) {
@@ -548,7 +536,7 @@ class Dex2Oat FINAL {
} else if (option.starts_with("--instruction-set=")) {
StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data();
// StringPiece is not necessarily zero-terminated, so need to make a copy and ensure it.
- std::unique_ptr<char> buf(new char[instruction_set_str.length() + 1]);
+ std::unique_ptr<char[]> buf(new char[instruction_set_str.length() + 1]);
strncpy(buf.get(), instruction_set_str.data(), instruction_set_str.length());
buf.get()[instruction_set_str.length()] = 0;
instruction_set_ = GetInstructionSetFromString(buf.get());
@@ -584,8 +572,6 @@ class Dex2Oat FINAL {
compiler_kind_ = Compiler::kQuick;
} else if (backend_str == "Optimizing") {
compiler_kind_ = Compiler::kOptimizing;
- } else if (backend_str == "Portable") {
- compiler_kind_ = Compiler::kPortable;
} else {
Usage("Unknown compiler backend: %s", backend_str.data());
}
@@ -902,9 +888,6 @@ class Dex2Oat FINAL {
implicit_so_checks,
implicit_suspend_checks,
compile_pic,
- #ifdef ART_SEA_IR_MODE
- true,
- #endif
verbose_methods_.empty() ?
nullptr :
&verbose_methods_,
@@ -1162,8 +1145,6 @@ class Dex2Oat FINAL {
compiler_phases_timings_.get(),
profile_file_));
- driver_->GetCompiler()->SetBitcodeFileName(*driver_, bitcode_filename_);
-
driver_->CompileAll(class_loader, dex_files_, timings_);
}
@@ -1321,7 +1302,7 @@ class Dex2Oat FINAL {
std::unique_ptr<File> in(OS::OpenFileForReading(oat_unstripped_.c_str()));
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_stripped_.c_str()));
size_t buffer_size = 8192;
- std::unique_ptr<uint8_t> buffer(new uint8_t[buffer_size]);
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[buffer_size]);
while (true) {
int bytes_read = TEMP_FAILURE_RETRY(read(in->Fd(), buffer.get(), buffer_size));
if (bytes_read <= 0) {
@@ -1330,52 +1311,15 @@ class Dex2Oat FINAL {
bool write_ok = out->WriteFully(buffer.get(), bytes_read);
CHECK(write_ok);
}
- if (kUsePortableCompiler) {
- oat_file_.reset(out.release());
- } else {
- if (out->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_stripped_;
- return false;
- }
+ if (out->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close copied oat file: " << oat_stripped_;
+ return false;
}
VLOG(compiler) << "Oat file copied successfully (stripped): " << oat_stripped_;
}
return true;
}
- // Run the ElfStripper. Currently only relevant for the portable compiler.
- bool Strip() {
- if (kUsePortableCompiler) {
- // Portable includes debug symbols unconditionally. If we are not supposed to create them,
- // strip them now. Quick generates debug symbols only when the flag(s) are set.
- if (!compiler_options_->GetIncludeDebugSymbols()) {
- CHECK(oat_file_.get() != nullptr && oat_file_->IsOpened());
-
- TimingLogger::ScopedTiming t("dex2oat ElfStripper", timings_);
- // Strip unneeded sections for target
- off_t seek_actual = lseek(oat_file_->Fd(), 0, SEEK_SET);
- CHECK_EQ(0, seek_actual);
- std::string error_msg;
- if (!ElfFile::Strip(oat_file_.get(), &error_msg)) {
- LOG(ERROR) << "Failed to strip elf file: " << error_msg;
- oat_file_->Erase();
- return false;
- }
-
- if (!FlushCloseOatFile()) {
- return false;
- }
-
- // We wrote the oat file successfully, and want to keep it.
- VLOG(compiler) << "Oat file written successfully (stripped): " << oat_location_;
- } else {
- VLOG(compiler) << "Oat file written successfully without stripping: " << oat_location_;
- }
- }
-
- return true;
- }
-
bool FlushOatFile() {
if (oat_file_.get() != nullptr) {
TimingLogger::ScopedTiming t2("dex2oat Flush ELF", timings_);
@@ -1622,7 +1566,6 @@ class Dex2Oat FINAL {
std::string oat_location_;
std::string oat_filename_;
int oat_fd_;
- std::string bitcode_filename_;
std::vector<const char*> dex_filenames_;
std::vector<const char*> dex_locations_;
int zip_fd_;
@@ -1709,11 +1652,6 @@ static int CompileImage(Dex2Oat& dex2oat) {
return EXIT_FAILURE;
}
- // Strip, if necessary.
- if (!dex2oat.Strip()) {
- return EXIT_FAILURE;
- }
-
// FlushClose again, as stripping might have re-opened the oat file.
if (!dex2oat.FlushCloseOatFile()) {
return EXIT_FAILURE;
@@ -1754,11 +1692,6 @@ static int CompileApp(Dex2Oat& dex2oat) {
return EXIT_FAILURE;
}
- // Strip, if necessary.
- if (!dex2oat.Strip()) {
- return EXIT_FAILURE;
- }
-
// Flush and close the file.
if (!dex2oat.FlushCloseOatFile()) {
return EXIT_FAILURE;
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 9243b1a86a..52fd736cdb 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -1498,7 +1498,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
} else if ((op2 >> 3) == 6) { // 0110xxx
// Multiply, multiply accumulate, and absolute difference
op1 = (instr >> 20) & 0x7;
- op2 = (instr >> 4) & 0x2;
+ op2 = (instr >> 4) & 0x1;
ArmRegister Ra(instr, 12);
ArmRegister Rn(instr, 16);
ArmRegister Rm(instr, 0);
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index b3af8a676e..1a768c8b86 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -80,8 +80,6 @@ static void DumpReg0(std::ostream& os, uint8_t rex, size_t reg,
}
}
-enum RegFile { GPR, MMX, SSE };
-
static void DumpAnyReg(std::ostream& os, uint8_t rex, size_t reg,
bool byte_operand, uint8_t size_override, RegFile reg_file) {
if (reg_file == GPR) {
@@ -155,6 +153,90 @@ static void DumpSegmentOverride(std::ostream& os, uint8_t segment_prefix) {
}
}
+// Do not inline to avoid Clang stack frame problems. b/18733806
+NO_INLINE
+static std::string DumpCodeHex(const uint8_t* begin, const uint8_t* end) {
+ std::stringstream hex;
+ for (size_t i = 0; begin + i < end; ++i) {
+ hex << StringPrintf("%02X", begin[i]);
+ }
+ return hex.str();
+}
+
+std::string DisassemblerX86::DumpAddress(uint8_t mod, uint8_t rm, uint8_t rex64, uint8_t rex_w,
+ bool no_ops, bool byte_operand, bool byte_second_operand,
+ uint8_t* prefix, bool load, RegFile src_reg_file,
+ RegFile dst_reg_file, const uint8_t** instr,
+ uint32_t* address_bits) {
+ std::ostringstream address;
+ if (mod == 0 && rm == 5) {
+ if (!supports_rex_) { // Absolute address.
+ *address_bits = *reinterpret_cast<const uint32_t*>(*instr);
+ address << StringPrintf("[0x%x]", *address_bits);
+ } else { // 64-bit RIP relative addressing.
+ address << StringPrintf("[RIP + 0x%x]", *reinterpret_cast<const uint32_t*>(*instr));
+ }
+ (*instr) += 4;
+ } else if (rm == 4 && mod != 3) { // SIB
+ uint8_t sib = **instr;
+ (*instr)++;
+ uint8_t scale = (sib >> 6) & 3;
+ uint8_t index = (sib >> 3) & 7;
+ uint8_t base = sib & 7;
+ address << "[";
+ if (base != 5 || mod != 0) {
+ DumpBaseReg(address, rex64, base);
+ if (index != 4) {
+ address << " + ";
+ }
+ }
+ if (index != 4) {
+ DumpIndexReg(address, rex64, index);
+ if (scale != 0) {
+ address << StringPrintf(" * %d", 1 << scale);
+ }
+ }
+ if (mod == 0) {
+ if (base == 5) {
+ if (index != 4) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(*instr));
+ } else {
+ // 64-bit low 32-bit absolute address, redundant absolute address encoding on 32-bit.
+ *address_bits = *reinterpret_cast<const uint32_t*>(*instr);
+ address << StringPrintf("%d", *address_bits);
+ }
+ (*instr) += 4;
+ }
+ } else if (mod == 1) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int8_t*>(*instr));
+ (*instr)++;
+ } else if (mod == 2) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(*instr));
+ (*instr) += 4;
+ }
+ address << "]";
+ } else {
+ if (mod == 3) {
+ if (!no_ops) {
+ DumpRmReg(address, rex_w, rm, byte_operand || byte_second_operand,
+ prefix[2], load ? src_reg_file : dst_reg_file);
+ }
+ } else {
+ address << "[";
+ DumpBaseReg(address, rex64, rm);
+ if (mod == 1) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int8_t*>(*instr));
+ (*instr)++;
+ } else if (mod == 2) {
+ address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(*instr));
+ (*instr) += 4;
+ }
+ address << "]";
+ }
+ }
+ return address.str();
+}
+
size_t DisassemblerX86::DumpInstruction(std::ostream& os, const uint8_t* instr) {
const uint8_t* begin_instr = instr;
bool have_prefixes = true;
@@ -201,7 +283,12 @@ size_t DisassemblerX86::DumpInstruction(std::ostream& os, const uint8_t* instr)
bool reg_is_opcode = false;
size_t immediate_bytes = 0;
size_t branch_bytes = 0;
- std::ostringstream opcode;
+ std::string opcode_tmp; // Storage to keep StringPrintf result alive.
+ const char* opcode0 = ""; // Prefix part.
+ const char* opcode1 = ""; // Main opcode.
+ const char* opcode2 = ""; // Sub-opcode. E.g., jump type.
+ const char* opcode3 = ""; // Mod-rm part.
+ const char* opcode4 = ""; // Suffix part.
bool store = false; // stores to memory (ie rm is on the left)
bool load = false; // loads from memory (ie rm is on the right)
bool byte_operand = false; // true when the opcode is dealing with byte operands
@@ -220,12 +307,12 @@ size_t DisassemblerX86::DumpInstruction(std::ostream& os, const uint8_t* instr)
rm8_r8, rm32_r32, \
r8_rm8, r32_rm32, \
ax8_i8, ax32_i32) \
- case rm8_r8: opcode << #opname; store = true; has_modrm = true; byte_operand = true; break; \
- case rm32_r32: opcode << #opname; store = true; has_modrm = true; break; \
- case r8_rm8: opcode << #opname; load = true; has_modrm = true; byte_operand = true; break; \
- case r32_rm32: opcode << #opname; load = true; has_modrm = true; break; \
- case ax8_i8: opcode << #opname; ax = true; immediate_bytes = 1; byte_operand = true; break; \
- case ax32_i32: opcode << #opname; ax = true; immediate_bytes = 4; break;
+ case rm8_r8: opcode1 = #opname; store = true; has_modrm = true; byte_operand = true; break; \
+ case rm32_r32: opcode1 = #opname; store = true; has_modrm = true; break; \
+ case r8_rm8: opcode1 = #opname; load = true; has_modrm = true; byte_operand = true; break; \
+ case r32_rm32: opcode1 = #opname; load = true; has_modrm = true; break; \
+ case ax8_i8: opcode1 = #opname; ax = true; immediate_bytes = 1; byte_operand = true; break; \
+ case ax32_i32: opcode1 = #opname; ax = true; immediate_bytes = 4; break;
DISASSEMBLER_ENTRY(add,
0x00 /* RegMem8/Reg8 */, 0x01 /* RegMem32/Reg32 */,
@@ -262,65 +349,67 @@ DISASSEMBLER_ENTRY(cmp,
#undef DISASSEMBLER_ENTRY
case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57:
- opcode << "push";
+ opcode1 = "push";
reg_in_opcode = true;
target_specific = true;
break;
case 0x58: case 0x59: case 0x5A: case 0x5B: case 0x5C: case 0x5D: case 0x5E: case 0x5F:
- opcode << "pop";
+ opcode1 = "pop";
reg_in_opcode = true;
target_specific = true;
break;
case 0x63:
if ((rex & REX_W) != 0) {
- opcode << "movsxd";
+ opcode1 = "movsxd";
has_modrm = true;
load = true;
} else {
// In 32-bit mode (!supports_rex_) this is ARPL, with no REX prefix the functionality is the
// same as 'mov' but the use of the instruction is discouraged.
- opcode << StringPrintf("unknown opcode '%02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '%02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
- case 0x68: opcode << "push"; immediate_bytes = 4; break;
- case 0x69: opcode << "imul"; load = true; has_modrm = true; immediate_bytes = 4; break;
- case 0x6A: opcode << "push"; immediate_bytes = 1; break;
- case 0x6B: opcode << "imul"; load = true; has_modrm = true; immediate_bytes = 1; break;
+ case 0x68: opcode1 = "push"; immediate_bytes = 4; break;
+ case 0x69: opcode1 = "imul"; load = true; has_modrm = true; immediate_bytes = 4; break;
+ case 0x6A: opcode1 = "push"; immediate_bytes = 1; break;
+ case 0x6B: opcode1 = "imul"; load = true; has_modrm = true; immediate_bytes = 1; break;
case 0x70: case 0x71: case 0x72: case 0x73: case 0x74: case 0x75: case 0x76: case 0x77:
case 0x78: case 0x79: case 0x7A: case 0x7B: case 0x7C: case 0x7D: case 0x7E: case 0x7F:
static const char* condition_codes[] =
{"o", "no", "b/nae/c", "nb/ae/nc", "z/eq", "nz/ne", "be/na", "nbe/a",
"s", "ns", "p/pe", "np/po", "l/nge", "nl/ge", "le/ng", "nle/g"
};
- opcode << "j" << condition_codes[*instr & 0xF];
+ opcode1 = "j";
+ opcode2 = condition_codes[*instr & 0xF];
branch_bytes = 1;
break;
case 0x86: case 0x87:
- opcode << "xchg";
+ opcode1 = "xchg";
store = true;
has_modrm = true;
byte_operand = (*instr == 0x86);
break;
- case 0x88: opcode << "mov"; store = true; has_modrm = true; byte_operand = true; break;
- case 0x89: opcode << "mov"; store = true; has_modrm = true; break;
- case 0x8A: opcode << "mov"; load = true; has_modrm = true; byte_operand = true; break;
- case 0x8B: opcode << "mov"; load = true; has_modrm = true; break;
+ case 0x88: opcode1 = "mov"; store = true; has_modrm = true; byte_operand = true; break;
+ case 0x89: opcode1 = "mov"; store = true; has_modrm = true; break;
+ case 0x8A: opcode1 = "mov"; load = true; has_modrm = true; byte_operand = true; break;
+ case 0x8B: opcode1 = "mov"; load = true; has_modrm = true; break;
case 0x0F: // 2 byte extended opcode
instr++;
switch (*instr) {
case 0x10: case 0x11:
if (prefix[0] == 0xF2) {
- opcode << "movsd";
+ opcode1 = "movsd";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "movss";
+ opcode1 = "movss";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[2] == 0x66) {
- opcode << "movupd";
+ opcode1 = "movupd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "movups";
+ opcode1 = "movups";
}
has_modrm = true;
src_reg_file = dst_reg_file = SSE;
@@ -329,10 +418,10 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x12: case 0x13:
if (prefix[2] == 0x66) {
- opcode << "movlpd";
+ opcode1 = "movlpd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0) {
- opcode << "movlps";
+ opcode1 = "movlps";
}
has_modrm = true;
src_reg_file = dst_reg_file = SSE;
@@ -341,10 +430,10 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x16: case 0x17:
if (prefix[2] == 0x66) {
- opcode << "movhpd";
+ opcode1 = "movhpd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0) {
- opcode << "movhps";
+ opcode1 = "movhps";
}
has_modrm = true;
src_reg_file = dst_reg_file = SSE;
@@ -353,10 +442,10 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x28: case 0x29:
if (prefix[2] == 0x66) {
- opcode << "movapd";
+ opcode1 = "movapd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0) {
- opcode << "movaps";
+ opcode1 = "movaps";
}
has_modrm = true;
src_reg_file = dst_reg_file = SSE;
@@ -365,16 +454,16 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x2A:
if (prefix[2] == 0x66) {
- opcode << "cvtpi2pd";
+ opcode1 = "cvtpi2pd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "cvtsi2sd";
+ opcode1 = "cvtsi2sd";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "cvtsi2ss";
+ opcode1 = "cvtsi2ss";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "cvtpi2ps";
+ opcode1 = "cvtpi2ps";
}
load = true;
has_modrm = true;
@@ -382,16 +471,16 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x2C:
if (prefix[2] == 0x66) {
- opcode << "cvttpd2pi";
+ opcode1 = "cvttpd2pi";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "cvttsd2si";
+ opcode1 = "cvttsd2si";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "cvttss2si";
+ opcode1 = "cvttss2si";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "cvttps2pi";
+ opcode1 = "cvttps2pi";
}
load = true;
has_modrm = true;
@@ -399,30 +488,30 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x2D:
if (prefix[2] == 0x66) {
- opcode << "cvtpd2pi";
+ opcode1 = "cvtpd2pi";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "cvtsd2si";
+ opcode1 = "cvtsd2si";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "cvtss2si";
+ opcode1 = "cvtss2si";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "cvtps2pi";
+ opcode1 = "cvtps2pi";
}
load = true;
has_modrm = true;
src_reg_file = SSE;
break;
case 0x2E:
- opcode << "u";
+ opcode0 = "u";
FALLTHROUGH_INTENDED;
case 0x2F:
if (prefix[2] == 0x66) {
- opcode << "comisd";
+ opcode1 = "comisd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "comiss";
+ opcode1 = "comiss";
}
has_modrm = true;
load = true;
@@ -433,31 +522,33 @@ DISASSEMBLER_ENTRY(cmp,
if (prefix[2] == 0x66) {
switch (*instr) {
case 0x01:
- opcode << "phaddw";
+ opcode1 = "phaddw";
prefix[2] = 0;
has_modrm = true;
load = true;
src_reg_file = dst_reg_file = SSE;
break;
case 0x02:
- opcode << "phaddd";
+ opcode1 = "phaddd";
prefix[2] = 0;
has_modrm = true;
load = true;
src_reg_file = dst_reg_file = SSE;
break;
case 0x40:
- opcode << "pmulld";
+ opcode1 = "pmulld";
prefix[2] = 0;
has_modrm = true;
load = true;
src_reg_file = dst_reg_file = SSE;
break;
default:
- opcode << StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
} else {
- opcode << StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F 38 %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0x3A: // 3 byte extended opcode
@@ -465,7 +556,7 @@ DISASSEMBLER_ENTRY(cmp,
if (prefix[2] == 0x66) {
switch (*instr) {
case 0x14:
- opcode << "pextrb";
+ opcode1 = "pextrb";
prefix[2] = 0;
has_modrm = true;
store = true;
@@ -473,7 +564,7 @@ DISASSEMBLER_ENTRY(cmp,
immediate_bytes = 1;
break;
case 0x16:
- opcode << "pextrd";
+ opcode1 = "pextrd";
prefix[2] = 0;
has_modrm = true;
store = true;
@@ -481,48 +572,51 @@ DISASSEMBLER_ENTRY(cmp,
immediate_bytes = 1;
break;
default:
- opcode << StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
} else {
- opcode << StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F 3A %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
case 0x48: case 0x49: case 0x4A: case 0x4B: case 0x4C: case 0x4D: case 0x4E: case 0x4F:
- opcode << "cmov" << condition_codes[*instr & 0xF];
+ opcode1 = "cmov";
+ opcode2 = condition_codes[*instr & 0xF];
has_modrm = true;
load = true;
break;
case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57:
case 0x58: case 0x59: case 0x5C: case 0x5D: case 0x5E: case 0x5F: {
switch (*instr) {
- case 0x50: opcode << "movmsk"; break;
- case 0x51: opcode << "sqrt"; break;
- case 0x52: opcode << "rsqrt"; break;
- case 0x53: opcode << "rcp"; break;
- case 0x54: opcode << "and"; break;
- case 0x55: opcode << "andn"; break;
- case 0x56: opcode << "or"; break;
- case 0x57: opcode << "xor"; break;
- case 0x58: opcode << "add"; break;
- case 0x59: opcode << "mul"; break;
- case 0x5C: opcode << "sub"; break;
- case 0x5D: opcode << "min"; break;
- case 0x5E: opcode << "div"; break;
- case 0x5F: opcode << "max"; break;
+ case 0x50: opcode1 = "movmsk"; break;
+ case 0x51: opcode1 = "sqrt"; break;
+ case 0x52: opcode1 = "rsqrt"; break;
+ case 0x53: opcode1 = "rcp"; break;
+ case 0x54: opcode1 = "and"; break;
+ case 0x55: opcode1 = "andn"; break;
+ case 0x56: opcode1 = "or"; break;
+ case 0x57: opcode1 = "xor"; break;
+ case 0x58: opcode1 = "add"; break;
+ case 0x59: opcode1 = "mul"; break;
+ case 0x5C: opcode1 = "sub"; break;
+ case 0x5D: opcode1 = "min"; break;
+ case 0x5E: opcode1 = "div"; break;
+ case 0x5F: opcode1 = "max"; break;
default: LOG(FATAL) << "Unreachable"; UNREACHABLE();
}
if (prefix[2] == 0x66) {
- opcode << "pd";
+ opcode2 = "pd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "sd";
+ opcode2 = "sd";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "ss";
+ opcode2 = "ss";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "ps";
+ opcode2 = "ps";
}
load = true;
has_modrm = true;
@@ -531,16 +625,16 @@ DISASSEMBLER_ENTRY(cmp,
}
case 0x5A:
if (prefix[2] == 0x66) {
- opcode << "cvtpd2ps";
+ opcode1 = "cvtpd2ps";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "cvtsd2ss";
+ opcode1 = "cvtsd2ss";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
- opcode << "cvtss2sd";
+ opcode1 = "cvtss2sd";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "cvtps2pd";
+ opcode1 = "cvtps2pd";
}
load = true;
has_modrm = true;
@@ -548,15 +642,15 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x5B:
if (prefix[2] == 0x66) {
- opcode << "cvtps2dq";
+ opcode1 = "cvtps2dq";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF2) {
- opcode << "bad opcode F2 0F 5B";
+ opcode1 = "bad opcode F2 0F 5B";
} else if (prefix[0] == 0xF3) {
- opcode << "cvttps2dq";
+ opcode1 = "cvttps2dq";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << "cvtdq2ps";
+ opcode1 = "cvtdq2ps";
}
load = true;
has_modrm = true;
@@ -570,10 +664,10 @@ DISASSEMBLER_ENTRY(cmp,
src_reg_file = dst_reg_file = MMX;
}
switch (*instr) {
- case 0x60: opcode << "punpcklbw"; break;
- case 0x61: opcode << "punpcklwd"; break;
- case 0x62: opcode << "punpckldq"; break;
- case 0x6c: opcode << "punpcklqdq"; break;
+ case 0x60: opcode1 = "punpcklbw"; break;
+ case 0x61: opcode1 = "punpcklwd"; break;
+ case 0x62: opcode1 = "punpckldq"; break;
+ case 0x6c: opcode1 = "punpcklqdq"; break;
}
load = true;
has_modrm = true;
@@ -585,43 +679,44 @@ DISASSEMBLER_ENTRY(cmp,
} else {
dst_reg_file = MMX;
}
- opcode << "movd";
+ opcode1 = "movd";
load = true;
has_modrm = true;
break;
case 0x6F:
if (prefix[2] == 0x66) {
src_reg_file = dst_reg_file = SSE;
- opcode << "movdqa";
+ opcode1 = "movdqa";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[0] == 0xF3) {
src_reg_file = dst_reg_file = SSE;
- opcode << "movdqu";
+ opcode1 = "movdqu";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
dst_reg_file = MMX;
- opcode << "movq";
+ opcode1 = "movq";
}
load = true;
has_modrm = true;
break;
case 0x70:
if (prefix[2] == 0x66) {
- opcode << "pshufd";
+ opcode1 = "pshufd";
prefix[2] = 0;
has_modrm = true;
store = true;
src_reg_file = dst_reg_file = SSE;
immediate_bytes = 1;
} else if (prefix[0] == 0xF2) {
- opcode << "pshuflw";
+ opcode1 = "pshuflw";
prefix[0] = 0;
has_modrm = true;
store = true;
src_reg_file = dst_reg_file = SSE;
immediate_bytes = 1;
} else {
- opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0x71:
@@ -674,13 +769,14 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x7C:
if (prefix[0] == 0xF2) {
- opcode << "haddps";
+ opcode1 = "haddps";
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
} else if (prefix[2] == 0x66) {
- opcode << "haddpd";
+ opcode1 = "haddpd";
prefix[2] = 0; // clear prefix now it's served its purpose as part of the opcode
} else {
- opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
break;
}
src_reg_file = dst_reg_file = SSE;
@@ -694,43 +790,45 @@ DISASSEMBLER_ENTRY(cmp,
} else {
src_reg_file = MMX;
}
- opcode << "movd";
+ opcode1 = "movd";
has_modrm = true;
store = true;
break;
case 0x80: case 0x81: case 0x82: case 0x83: case 0x84: case 0x85: case 0x86: case 0x87:
case 0x88: case 0x89: case 0x8A: case 0x8B: case 0x8C: case 0x8D: case 0x8E: case 0x8F:
- opcode << "j" << condition_codes[*instr & 0xF];
+ opcode1 = "j";
+ opcode2 = condition_codes[*instr & 0xF];
branch_bytes = 4;
break;
case 0x90: case 0x91: case 0x92: case 0x93: case 0x94: case 0x95: case 0x96: case 0x97:
case 0x98: case 0x99: case 0x9A: case 0x9B: case 0x9C: case 0x9D: case 0x9E: case 0x9F:
- opcode << "set" << condition_codes[*instr & 0xF];
+ opcode1 = "set";
+ opcode2 = condition_codes[*instr & 0xF];
modrm_opcodes = nullptr;
reg_is_opcode = true;
has_modrm = true;
store = true;
break;
case 0xA4:
- opcode << "shld";
+ opcode1 = "shld";
has_modrm = true;
load = true;
immediate_bytes = 1;
break;
case 0xA5:
- opcode << "shld";
+ opcode1 = "shld";
has_modrm = true;
load = true;
cx = true;
break;
case 0xAC:
- opcode << "shrd";
+ opcode1 = "shrd";
has_modrm = true;
load = true;
immediate_bytes = 1;
break;
case 0xAD:
- opcode << "shrd";
+ opcode1 = "shrd";
has_modrm = true;
load = true;
cx = true;
@@ -778,61 +876,62 @@ DISASSEMBLER_ENTRY(cmp,
}
break;
case 0xAF:
- opcode << "imul";
+ opcode1 = "imul";
has_modrm = true;
load = true;
break;
case 0xB1:
- opcode << "cmpxchg";
+ opcode1 = "cmpxchg";
has_modrm = true;
store = true;
break;
case 0xB6:
- opcode << "movzxb";
+ opcode1 = "movzxb";
has_modrm = true;
load = true;
byte_second_operand = true;
break;
case 0xB7:
- opcode << "movzxw";
+ opcode1 = "movzxw";
has_modrm = true;
load = true;
break;
case 0xBE:
- opcode << "movsxb";
+ opcode1 = "movsxb";
has_modrm = true;
load = true;
byte_second_operand = true;
rex |= (rex == 0 ? 0 : REX_W);
break;
case 0xBF:
- opcode << "movsxw";
+ opcode1 = "movsxw";
has_modrm = true;
load = true;
break;
case 0xC3:
- opcode << "movnti";
+ opcode1 = "movnti";
store = true;
has_modrm = true;
break;
case 0xC5:
if (prefix[2] == 0x66) {
- opcode << "pextrw";
+ opcode1 = "pextrw";
prefix[2] = 0;
has_modrm = true;
store = true;
src_reg_file = SSE;
immediate_bytes = 1;
} else {
- opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0xC6:
if (prefix[2] == 0x66) {
- opcode << "shufpd";
+ opcode1 = "shufpd";
prefix[2] = 0;
} else {
- opcode << "shufps";
+ opcode1 = "shufps";
}
has_modrm = true;
store = true;
@@ -849,7 +948,7 @@ DISASSEMBLER_ENTRY(cmp,
store = true;
break;
case 0xC8: case 0xC9: case 0xCA: case 0xCB: case 0xCC: case 0xCD: case 0xCE: case 0xCF:
- opcode << "bswap";
+ opcode1 = "bswap";
reg_in_opcode = true;
break;
case 0xD4:
@@ -859,7 +958,7 @@ DISASSEMBLER_ENTRY(cmp,
} else {
src_reg_file = dst_reg_file = MMX;
}
- opcode << "paddq";
+ opcode1 = "paddq";
prefix[2] = 0;
has_modrm = true;
load = true;
@@ -871,20 +970,21 @@ DISASSEMBLER_ENTRY(cmp,
} else {
src_reg_file = dst_reg_file = MMX;
}
- opcode << "pand";
+ opcode1 = "pand";
prefix[2] = 0;
has_modrm = true;
load = true;
break;
case 0xD5:
if (prefix[2] == 0x66) {
- opcode << "pmullw";
+ opcode1 = "pmullw";
prefix[2] = 0;
has_modrm = true;
load = true;
src_reg_file = dst_reg_file = SSE;
} else {
- opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0xEB:
@@ -894,7 +994,7 @@ DISASSEMBLER_ENTRY(cmp,
} else {
src_reg_file = dst_reg_file = MMX;
}
- opcode << "por";
+ opcode1 = "por";
prefix[2] = 0;
has_modrm = true;
load = true;
@@ -906,7 +1006,7 @@ DISASSEMBLER_ENTRY(cmp,
} else {
src_reg_file = dst_reg_file = MMX;
}
- opcode << "pxor";
+ opcode1 = "pxor";
prefix[2] = 0;
has_modrm = true;
load = true;
@@ -927,22 +1027,23 @@ DISASSEMBLER_ENTRY(cmp,
src_reg_file = dst_reg_file = MMX;
}
switch (*instr) {
- case 0xF4: opcode << "pmuludq"; break;
- case 0xF6: opcode << "psadbw"; break;
- case 0xF8: opcode << "psubb"; break;
- case 0xF9: opcode << "psubw"; break;
- case 0xFA: opcode << "psubd"; break;
- case 0xFB: opcode << "psubq"; break;
- case 0xFC: opcode << "paddb"; break;
- case 0xFD: opcode << "paddw"; break;
- case 0xFE: opcode << "paddd"; break;
+ case 0xF4: opcode1 = "pmuludq"; break;
+ case 0xF6: opcode1 = "psadbw"; break;
+ case 0xF8: opcode1 = "psubb"; break;
+ case 0xF9: opcode1 = "psubw"; break;
+ case 0xFA: opcode1 = "psubd"; break;
+ case 0xFB: opcode1 = "psubq"; break;
+ case 0xFC: opcode1 = "paddb"; break;
+ case 0xFD: opcode1 = "paddw"; break;
+ case 0xFE: opcode1 = "paddd"; break;
}
prefix[2] = 0;
has_modrm = true;
load = true;
break;
default:
- opcode << StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '0F %02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
break;
}
break;
@@ -956,38 +1057,39 @@ DISASSEMBLER_ENTRY(cmp,
immediate_bytes = *instr == 0x81 ? 4 : 1;
break;
case 0x84: case 0x85:
- opcode << "test";
+ opcode1 = "test";
has_modrm = true;
load = true;
byte_operand = (*instr & 1) == 0;
break;
case 0x8D:
- opcode << "lea";
+ opcode1 = "lea";
has_modrm = true;
load = true;
break;
case 0x8F:
- opcode << "pop";
+ opcode1 = "pop";
has_modrm = true;
reg_is_opcode = true;
store = true;
break;
case 0x99:
- opcode << "cdq";
+ opcode1 = "cdq";
break;
case 0x9B:
if (instr[1] == 0xDF && instr[2] == 0xE0) {
- opcode << "fstsw\tax";
+ opcode1 = "fstsw\tax";
instr += 2;
} else {
- opcode << StringPrintf("unknown opcode '%02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '%02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0xAF:
- opcode << (prefix[2] == 0x66 ? "scasw" : "scasl");
+ opcode1 = (prefix[2] == 0x66 ? "scasw" : "scasl");
break;
case 0xB0: case 0xB1: case 0xB2: case 0xB3: case 0xB4: case 0xB5: case 0xB6: case 0xB7:
- opcode << "mov";
+ opcode1 = "mov";
immediate_bytes = 1;
byte_operand = true;
reg_in_opcode = true;
@@ -995,12 +1097,12 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0xB8: case 0xB9: case 0xBA: case 0xBB: case 0xBC: case 0xBD: case 0xBE: case 0xBF:
if ((rex & REX_W) != 0) {
- opcode << "movabsq";
+ opcode1 = "movabsq";
immediate_bytes = 8;
reg_in_opcode = true;
break;
}
- opcode << "mov";
+ opcode1 = "mov";
immediate_bytes = 4;
reg_in_opcode = true;
break;
@@ -1016,7 +1118,7 @@ DISASSEMBLER_ENTRY(cmp,
cx = (*instr == 0xD2) || (*instr == 0xD3);
byte_operand = (*instr == 0xC0);
break;
- case 0xC3: opcode << "ret"; break;
+ case 0xC3: opcode1 = "ret"; break;
case 0xC6:
static const char* c6_opcodes[] = {"mov", "unknown-c6", "unknown-c6",
"unknown-c6", "unknown-c6", "unknown-c6",
@@ -1038,10 +1140,10 @@ DISASSEMBLER_ENTRY(cmp,
has_modrm = true;
reg_is_opcode = true;
break;
- case 0xCC: opcode << "int 3"; break;
+ case 0xCC: opcode1 = "int 3"; break;
case 0xD9:
if (instr[1] == 0xF8) {
- opcode << "fprem";
+ opcode1 = "fprem";
instr++;
} else {
static const char* d9_opcodes[] = {"flds", "unknown-d9", "fsts", "fstps", "fldenv", "fldcw",
@@ -1054,10 +1156,11 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0xDA:
if (instr[1] == 0xE9) {
- opcode << "fucompp";
+ opcode1 = "fucompp";
instr++;
} else {
- opcode << StringPrintf("unknown opcode '%02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '%02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
}
break;
case 0xDB:
@@ -1087,11 +1190,11 @@ DISASSEMBLER_ENTRY(cmp,
has_modrm = true;
reg_is_opcode = true;
break;
- case 0xE3: opcode << "jecxz"; branch_bytes = 1; break;
- case 0xE8: opcode << "call"; branch_bytes = 4; break;
- case 0xE9: opcode << "jmp"; branch_bytes = 4; break;
- case 0xEB: opcode << "jmp"; branch_bytes = 1; break;
- case 0xF5: opcode << "cmc"; break;
+ case 0xE3: opcode1 = "jecxz"; branch_bytes = 1; break;
+ case 0xE8: opcode1 = "call"; branch_bytes = 4; break;
+ case 0xE9: opcode1 = "jmp"; branch_bytes = 4; break;
+ case 0xEB: opcode1 = "jmp"; branch_bytes = 1; break;
+ case 0xF5: opcode1 = "cmc"; break;
case 0xF6: case 0xF7:
static const char* f7_opcodes[] = {
"test", "unknown-f7", "not", "neg", "mul edx:eax, eax *",
@@ -1120,7 +1223,8 @@ DISASSEMBLER_ENTRY(cmp,
}
break;
default:
- opcode << StringPrintf("unknown opcode '%02X'", *instr);
+ opcode_tmp = StringPrintf("unknown opcode '%02X'", *instr);
+ opcode1 = opcode_tmp.c_str();
break;
}
std::ostringstream args;
@@ -1141,84 +1245,21 @@ DISASSEMBLER_ENTRY(cmp,
uint8_t mod = modrm >> 6;
uint8_t reg_or_opcode = (modrm >> 3) & 7;
uint8_t rm = modrm & 7;
- std::ostringstream address;
- if (mod == 0 && rm == 5) {
- if (!supports_rex_) { // Absolute address.
- address_bits = *reinterpret_cast<const uint32_t*>(instr);
- address << StringPrintf("[0x%x]", address_bits);
- } else { // 64-bit RIP relative addressing.
- address << StringPrintf("[RIP + 0x%x]", *reinterpret_cast<const uint32_t*>(instr));
- }
- instr += 4;
- } else if (rm == 4 && mod != 3) { // SIB
- uint8_t sib = *instr;
- instr++;
- uint8_t scale = (sib >> 6) & 3;
- uint8_t index = (sib >> 3) & 7;
- uint8_t base = sib & 7;
- address << "[";
- if (base != 5 || mod != 0) {
- DumpBaseReg(address, rex64, base);
- if (index != 4) {
- address << " + ";
- }
- }
- if (index != 4) {
- DumpIndexReg(address, rex64, index);
- if (scale != 0) {
- address << StringPrintf(" * %d", 1 << scale);
- }
- }
- if (mod == 0) {
- if (base == 5) {
- if (index != 4) {
- address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(instr));
- } else {
- // 64-bit low 32-bit absolute address, redundant absolute address encoding on 32-bit.
- address_bits = *reinterpret_cast<const uint32_t*>(instr);
- address << StringPrintf("%d", address_bits);
- }
- instr += 4;
- }
- } else if (mod == 1) {
- address << StringPrintf(" + %d", *reinterpret_cast<const int8_t*>(instr));
- instr++;
- } else if (mod == 2) {
- address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(instr));
- instr += 4;
- }
- address << "]";
- } else {
- if (mod == 3) {
- if (!no_ops) {
- DumpRmReg(address, rex_w, rm, byte_operand || byte_second_operand,
- prefix[2], load ? src_reg_file : dst_reg_file);
- }
- } else {
- address << "[";
- DumpBaseReg(address, rex64, rm);
- if (mod == 1) {
- address << StringPrintf(" + %d", *reinterpret_cast<const int8_t*>(instr));
- instr++;
- } else if (mod == 2) {
- address << StringPrintf(" + %d", *reinterpret_cast<const int32_t*>(instr));
- instr += 4;
- }
- address << "]";
- }
- }
+ std::string address = DumpAddress(mod, rm, rex64, rex_w, no_ops, byte_operand,
+ byte_second_operand, prefix, load, src_reg_file, dst_reg_file,
+ &instr, &address_bits);
if (reg_is_opcode && modrm_opcodes != nullptr) {
- opcode << modrm_opcodes[reg_or_opcode];
+ opcode3 = modrm_opcodes[reg_or_opcode];
}
// Add opcode suffixes to indicate size.
if (byte_operand) {
- opcode << 'b';
+ opcode4 = "b";
} else if ((rex & REX_W) != 0) {
- opcode << 'q';
+ opcode4 = "q";
} else if (prefix[2] == 0x66) {
- opcode << 'w';
+ opcode4 = "w";
}
if (load) {
@@ -1227,11 +1268,11 @@ DISASSEMBLER_ENTRY(cmp,
args << ", ";
}
DumpSegmentOverride(args, prefix[1]);
- args << address.str();
+ args << address;
} else {
DCHECK(store);
DumpSegmentOverride(args, prefix[1]);
- args << address.str();
+ args << address;
if (!reg_is_opcode) {
args << ", ";
DumpReg(args, rex, reg_or_opcode, byte_operand, prefix[2], src_reg_file);
@@ -1289,21 +1330,17 @@ DISASSEMBLER_ENTRY(cmp,
args << " ; ";
Thread::DumpThreadOffset<8>(args, address_bits);
}
- std::stringstream hex;
- for (size_t i = 0; begin_instr + i < instr; ++i) {
- hex << StringPrintf("%02X", begin_instr[i]);
- }
- std::stringstream prefixed_opcode;
+ const char* prefix_str;
switch (prefix[0]) {
- case 0xF0: prefixed_opcode << "lock "; break;
- case 0xF2: prefixed_opcode << "repne "; break;
- case 0xF3: prefixed_opcode << "repe "; break;
- case 0: break;
+ case 0xF0: prefix_str = "lock "; break;
+ case 0xF2: prefix_str = "repne "; break;
+ case 0xF3: prefix_str = "repe "; break;
+ case 0: prefix_str = ""; break;
default: LOG(FATAL) << "Unreachable"; UNREACHABLE();
}
- prefixed_opcode << opcode.str();
os << FormatInstructionPointer(begin_instr)
- << StringPrintf(": %22s \t%-7s ", hex.str().c_str(), prefixed_opcode.str().c_str())
+ << StringPrintf(": %22s \t%-7s%s%s%s%s%s ", DumpCodeHex(begin_instr, instr).c_str(),
+ prefix_str, opcode0, opcode1, opcode2, opcode3, opcode4)
<< args.str() << '\n';
return instr - begin_instr;
} // NOLINT(readability/fn_size)
diff --git a/disassembler/disassembler_x86.h b/disassembler/disassembler_x86.h
index f448662f66..71c3e4161c 100644
--- a/disassembler/disassembler_x86.h
+++ b/disassembler/disassembler_x86.h
@@ -22,6 +22,8 @@
namespace art {
namespace x86 {
+enum RegFile { GPR, MMX, SSE };
+
class DisassemblerX86 FINAL : public Disassembler {
public:
DisassemblerX86(DisassemblerOptions* options, bool supports_rex)
@@ -33,6 +35,11 @@ class DisassemblerX86 FINAL : public Disassembler {
private:
size_t DumpInstruction(std::ostream& os, const uint8_t* instr);
+ std::string DumpAddress(uint8_t mod, uint8_t rm, uint8_t rex64, uint8_t rex_w, bool no_ops,
+ bool byte_operand, bool byte_second_operand, uint8_t* prefix, bool load,
+ RegFile src_reg_file, RegFile dst_reg_file, const uint8_t** instr,
+ uint32_t* address_bits);
+
const bool supports_rex_;
DISALLOW_COPY_AND_ASSIGN(DisassemblerX86);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index b048833431..5c5e2b4108 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -442,12 +442,6 @@ class OatDumper {
GetInterpreterToCompiledCodeBridgeOffset);
DUMP_OAT_HEADER_OFFSET("JNI DLSYM LOOKUP",
GetJniDlsymLookupOffset);
- DUMP_OAT_HEADER_OFFSET("PORTABLE IMT CONFLICT TRAMPOLINE",
- GetPortableImtConflictTrampolineOffset);
- DUMP_OAT_HEADER_OFFSET("PORTABLE RESOLUTION TRAMPOLINE",
- GetPortableResolutionTrampolineOffset);
- DUMP_OAT_HEADER_OFFSET("PORTABLE TO INTERPRETER BRIDGE",
- GetPortableToInterpreterBridgeOffset);
DUMP_OAT_HEADER_OFFSET("QUICK GENERIC JNI TRAMPOLINE",
GetQuickGenericJniTrampolineOffset);
DUMP_OAT_HEADER_OFFSET("QUICK IMT CONFLICT TRAMPOLINE",
@@ -851,11 +845,6 @@ class OatDumper {
} else {
const void* code = oat_method.GetQuickCode();
uint32_t code_size = oat_method.GetQuickCodeSize();
- if (code == nullptr) {
- code = oat_method.GetPortableCode();
- code_size = oat_method.GetPortableCodeSize();
- code_size_offset = 0;
- }
uint32_t code_offset = oat_method.GetCodeOffset();
uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
uint64_t aligned_code_end = aligned_code_begin + code_size;
@@ -1054,23 +1043,12 @@ class OatDumper {
return; // No GC map.
}
const void* quick_code = oat_method.GetQuickCode();
- if (quick_code != nullptr) {
- NativePcOffsetToReferenceMap map(gc_map_raw);
- for (size_t entry = 0; entry < map.NumEntries(); entry++) {
- const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(quick_code) +
- map.GetNativePcOffset(entry);
- os << StringPrintf("%p", native_pc);
- DumpGcMapRegisters(os, oat_method, code_item, map.RegWidth() * 8, map.GetBitMap(entry));
- }
- } else {
- const void* portable_code = oat_method.GetPortableCode();
- CHECK(portable_code != nullptr);
- verifier::DexPcToReferenceMap map(gc_map_raw);
- for (size_t entry = 0; entry < map.NumEntries(); entry++) {
- uint32_t dex_pc = map.GetDexPc(entry);
- os << StringPrintf("0x%08x", dex_pc);
- DumpGcMapRegisters(os, oat_method, code_item, map.RegWidth() * 8, map.GetBitMap(entry));
- }
+ NativePcOffsetToReferenceMap map(gc_map_raw);
+ for (size_t entry = 0; entry < map.NumEntries(); entry++) {
+ const uint8_t* native_pc = reinterpret_cast<const uint8_t*>(quick_code) +
+ map.GetNativePcOffset(entry);
+ os << StringPrintf("%p", native_pc);
+ DumpGcMapRegisters(os, oat_method, code_item, map.RegWidth() * 8, map.GetBitMap(entry));
}
}
@@ -1228,16 +1206,15 @@ class OatDumper {
void DumpCode(std::ostream& os, verifier::MethodVerifier* verifier,
const OatFile::OatMethod& oat_method, const DexFile::CodeItem* code_item,
bool bad_input, size_t code_size) {
- const void* portable_code = oat_method.GetPortableCode();
const void* quick_code = oat_method.GetQuickCode();
if (code_size == 0) {
code_size = oat_method.GetQuickCodeSize();
}
- if ((code_size == 0) || ((portable_code == nullptr) && (quick_code == nullptr))) {
+ if (code_size == 0 || quick_code == nullptr) {
os << "NO CODE!\n";
return;
- } else if (quick_code != nullptr) {
+ } else {
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
while (offset < code_size) {
@@ -1255,9 +1232,6 @@ class OatDumper {
}
}
}
- } else {
- CHECK(portable_code != nullptr);
- CHECK_EQ(code_size, 0U); // TODO: disassembly of portable is currently not supported.
}
}
@@ -1636,7 +1610,6 @@ class ImageDumper {
state->oat_dumper_->GetOatInstructionSet());
mirror::ArtMethod* method = obj->AsArtMethod();
if (method->IsNative()) {
- // TODO: portable dumping.
DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
bool first_occurrence;
@@ -1679,7 +1652,6 @@ class ImageDumper {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
- // TODO: portable dumping.
const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 68fd15bc20..b6ec223680 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -540,12 +540,6 @@ void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
const size_t pointer_size = InstructionSetPointerSize(isa_);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
- uintptr_t portable = reinterpret_cast<uintptr_t>(
- object->GetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(pointer_size));
- if (portable != 0) {
- copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast<void*>(portable + delta_),
- pointer_size);
- }
uintptr_t quick= reinterpret_cast<uintptr_t>(
object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size));
if (quick != 0) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index b362b73ef2..ca29eba4ee 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -176,17 +176,6 @@ LIBART_COMMON_SRC_FILES += \
entrypoints/interpreter/interpreter_entrypoints.cc \
entrypoints/jni/jni_entrypoints.cc \
entrypoints/math_entrypoints.cc \
- entrypoints/portable/portable_alloc_entrypoints.cc \
- entrypoints/portable/portable_cast_entrypoints.cc \
- entrypoints/portable/portable_dexcache_entrypoints.cc \
- entrypoints/portable/portable_field_entrypoints.cc \
- entrypoints/portable/portable_fillarray_entrypoints.cc \
- entrypoints/portable/portable_invoke_entrypoints.cc \
- entrypoints/portable/portable_jni_entrypoints.cc \
- entrypoints/portable/portable_lock_entrypoints.cc \
- entrypoints/portable/portable_thread_entrypoints.cc \
- entrypoints/portable/portable_throw_entrypoints.cc \
- entrypoints/portable/portable_trampoline_entrypoints.cc \
entrypoints/quick/quick_alloc_entrypoints.cc \
entrypoints/quick/quick_cast_entrypoints.cc \
entrypoints/quick/quick_deoptimization_entrypoints.cc \
@@ -221,7 +210,6 @@ LIBART_TARGET_SRC_FILES_arm := \
arch/arm/instruction_set_features_assembly_tests.S \
arch/arm/jni_entrypoints_arm.S \
arch/arm/memcmp16_arm.S \
- arch/arm/portable_entrypoints_arm.S \
arch/arm/quick_entrypoints_arm.S \
arch/arm/quick_entrypoints_cc_arm.cc \
arch/arm/thread_arm.cc \
@@ -232,7 +220,6 @@ LIBART_TARGET_SRC_FILES_arm64 := \
arch/arm64/entrypoints_init_arm64.cc \
arch/arm64/jni_entrypoints_arm64.S \
arch/arm64/memcmp16_arm64.S \
- arch/arm64/portable_entrypoints_arm64.S \
arch/arm64/quick_entrypoints_arm64.S \
arch/arm64/thread_arm64.cc \
monitor_pool.cc \
@@ -243,7 +230,6 @@ LIBART_SRC_FILES_x86 := \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
arch/x86/memcmp16_x86.S \
- arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
arch/x86/thread_x86.cc \
arch/x86/fault_handler_x86.cc
@@ -258,7 +244,6 @@ LIBART_SRC_FILES_x86_64 := \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
arch/x86_64/memcmp16_x86_64.S \
- arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
monitor_pool.cc \
@@ -272,7 +257,6 @@ LIBART_TARGET_SRC_FILES_mips := \
arch/mips/entrypoints_init_mips.cc \
arch/mips/jni_entrypoints_mips.S \
arch/mips/memcmp16_mips.S \
- arch/mips/portable_entrypoints_mips.S \
arch/mips/quick_entrypoints_mips.S \
arch/mips/thread_mips.cc \
arch/mips/fault_handler_mips.cc
@@ -325,9 +309,6 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
verifier/method_verifier.h
LIBART_CFLAGS := -DBUILDING_LIBART=1
-ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- LIBART_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
-endif
ifeq ($(MALLOC_IMPL),dlmalloc)
LIBART_CFLAGS += -DUSE_DLMALLOC
@@ -483,14 +464,6 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
# For ashmem_create_region.
LOCAL_STATIC_LIBRARIES += libcutils
endif
- ifeq ($$(ART_USE_PORTABLE_COMPILER),true)
- include $$(LLVM_GEN_INTRINSICS_MK)
- ifeq ($$(art_target_or_host),target)
- include $$(LLVM_DEVICE_BUILD_MK)
- else # host
- include $$(LLVM_HOST_BUILD_MK)
- endif
- endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $$(LOCAL_PATH)/Android.mk
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 85a0dd2c6a..ce0e614854 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -16,7 +16,6 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -49,7 +48,7 @@ extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8
extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ QuickEntryPoints* qpoints) {
// Interpreter
ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
@@ -57,10 +56,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
- // Portable
- ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
- ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
-
// Alloc
ResetQuickAllocEntryPoints(qpoints);
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
deleted file mode 100644
index f59b514866..0000000000
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_arm.S"
-
- /*
- * Portable invocation stub.
- * On entry:
- * r0 = method pointer
- * r1 = argument array or NULL for no argument methods
- * r2 = size of argument array in bytes
- * r3 = (managed) thread pointer
- * [sp] = JValue* result
- * [sp + 4] = result type char
- */
-ENTRY art_portable_invoke_stub
- push {r0, r4, r5, r9, r11, lr} @ spill regs
- .cfi_adjust_cfa_offset 24
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r9, 12
- .cfi_rel_offset r11, 16
- .cfi_rel_offset lr, 20
- mov r11, sp @ save the stack pointer
- .cfi_def_cfa_register r11
- mov r9, r3 @ move managed thread pointer into r9
- mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
- add r5, r2, #16 @ create space for method pointer in frame
- and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
- sub sp, r5 @ reserve stack space for argument array
- add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
- bl memcpy @ memcpy (dest, src, bytes)
- ldr r0, [r11] @ restore method*
- ldr r1, [sp, #4] @ copy arg value for r1
- ldr r2, [sp, #8] @ copy arg value for r2
- ldr r3, [sp, #12] @ copy arg value for r3
- mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
- add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32] @ get pointer to the code
- blx ip @ call the method
- mov sp, r11 @ restore the stack pointer
- .cfi_def_cfa_register sp
- ldr ip, [sp, #24] @ load the result pointer
- strd r0, [ip] @ store r0/r1 into result pointer
- pop {r0, r4, r5, r9, r11, pc} @ restore spill regs
-END art_portable_invoke_stub
-
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
- @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
- push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r1, 0
- .cfi_rel_offset r2, 4
- .cfi_rel_offset r3, 8
- .cfi_rel_offset r5, 12
- .cfi_rel_offset r6, 16
- .cfi_rel_offset r7, 20
- .cfi_rel_offset r8, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset r11, 32
- .cfi_rel_offset lr, 36
- sub sp, #8 @ 2 words of space, bottom word will hold Method*
- .cfi_adjust_cfa_offset 8
- @ Begin argument set up.
- str r0, [sp, #0] @ place proxy method at bottom of frame
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
- ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
- bx lr @ return
-END art_portable_proxy_invoke_handler
-
- .extern artPortableResolutionTrampoline
-ENTRY art_portable_resolution_trampoline
- @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
- @ TODO: just save the registers that are needed in artPortableResolutionTrampoline.
- push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r1, 0
- .cfi_rel_offset r2, 4
- .cfi_rel_offset r3, 8
- .cfi_rel_offset r5, 12
- .cfi_rel_offset r6, 16
- .cfi_rel_offset r7, 20
- .cfi_rel_offset r8, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset r11, 32
- .cfi_rel_offset lr, 36
- sub sp, #8 @ 2 words of space, bottom word will hold Method*
- .cfi_adjust_cfa_offset 8
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- blx artPortableResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
- cmp r0, #0 @ is code pointer null?
- beq 1f @ goto exception
- mov r12, r0
- ldr r0, [sp, #0] @ load resolved method in r0
- ldr r1, [sp, #8] @ restore non-callee save r1
- ldrd r2, [sp, #12] @ restore non-callee saves r2-r3
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ rewind sp
- .cfi_adjust_cfa_offset -48
- bx r12 @ tail-call into actual code
- .cfi_adjust_cfa_offset 48 @ Reset unwind info so following code unwinds.
-
-1:
- ldr r1, [sp, #8] @ restore non-callee save r1
- ldrd r2, [sp, #12] @ restore non-callee saves r2-r3
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ rewind sp
- .cfi_adjust_cfa_offset -48
- bx lr
-END art_portable_resolution_trampoline
-
- .extern artPortableToInterpreterBridge
-ENTRY art_portable_to_interpreter_bridge
- @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
- @ TODO: just save the registers that are needed in artPortableToInterpreterBridge.
- push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
- .cfi_adjust_cfa_offset 40
- .cfi_rel_offset r1, 0
- .cfi_rel_offset r2, 4
- .cfi_rel_offset r3, 8
- .cfi_rel_offset r5, 12
- .cfi_rel_offset r6, 16
- .cfi_rel_offset r7, 20
- .cfi_rel_offset r8, 24
- .cfi_rel_offset r10, 28
- .cfi_rel_offset r11, 32
- .cfi_rel_offset lr, 36
- sub sp, #8 @ 2 words of space, bottom word will hold Method*
- .cfi_adjust_cfa_offset 8
- mov r1, r9 @ pass Thread::Current
- mov r2, sp @ pass SP
- blx artPortableToInterpreterBridge @ (Method* method, Thread*, SP)
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
- bx lr @ return
-END art_portable_to_interpreter_bridge
-
-UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 0ae54dc010..66ea3ce8e5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -379,12 +379,17 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
* +-------------------------+
*/
ENTRY art_quick_invoke_stub_internal
- push {r4, r9, r11, lr} @ spill regs
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr} @ spill regs
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r4, 0
- .cfi_rel_offset r9, 4
- .cfi_rel_offset r11, 8
- .cfi_rel_offset lr, 12
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset r7, 12
+ .cfi_rel_offset r8, 16
+ .cfi_rel_offset r9, 20
+ .cfi_rel_offset r10, 24
+ .cfi_rel_offset r11, 28
+ .cfi_rel_offset lr, 32
mov r11, sp @ save the stack pointer
.cfi_def_cfa_register r11
@@ -401,10 +406,10 @@ ENTRY art_quick_invoke_stub_internal
mov ip, #0 @ set ip to 0
str ip, [sp] @ store NULL for method* at bottom of frame
- ldr ip, [r11, #28] @ load fp register argument array pointer
+ ldr ip, [r11, #48] @ load fp register argument array pointer
vldm ip, {s0-s15} @ copy s0 - s15
- ldr ip, [r11, #24] @ load core register argument array pointer
+ ldr ip, [r11, #44] @ load core register argument array pointer
mov r0, r4 @ restore method*
add ip, ip, #4 @ skip r0
ldm ip, {r1-r3} @ copy r1 - r3
@@ -419,14 +424,14 @@ ENTRY art_quick_invoke_stub_internal
mov sp, r11 @ restore the stack pointer
.cfi_def_cfa_register sp
- ldr r4, [sp, #20] @ load result_is_float
- ldr r9, [sp, #16] @ load the result pointer
+ ldr r4, [sp, #40] @ load result_is_float
+ ldr r9, [sp, #36] @ load the result pointer
cmp r4, #0
ite eq
strdeq r0, [r9] @ store r0/r1 into result pointer
vstrne d0, [r9] @ store s0-s1/d0 into result pointer
- pop {r4, r9, r11, pc} @ restore spill regs
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, pc} @ restore spill regs
END art_quick_invoke_stub_internal
/*
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2d26c033e1..e68d41df5a 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -16,7 +16,6 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -39,7 +38,7 @@ extern "C" double art_quick_fmod(double a, double b); // REM_DOUBLE[_2ADD
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ QuickEntryPoints* qpoints) {
// Interpreter
ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
@@ -47,10 +46,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
- // Portable
- ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
- ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
-
// Alloc
ResetQuickAllocEntryPoints(qpoints);
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 44159354ab..6047bb063f 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -499,7 +499,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=6*8 // x4, x5, xSUSPEND, SP, LR & FP saved.
+SAVE_SIZE=15*8 // x4, x5, x20, x21, x22, x23, x24, x25, x26, x27, x28, xSUSPEND, SP, LR, FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
@@ -515,6 +515,25 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
+ str x28, [x10, #112]
+ .cfi_rel_offset x28, 112
+
+ stp x26, x27, [x10, #96]
+ .cfi_rel_offset x26, 96
+ .cfi_rel_offset x27, 104
+
+ stp x24, x25, [x10, #80]
+ .cfi_rel_offset x24, 80
+ .cfi_rel_offset x25, 88
+
+ stp x22, x23, [x10, #64]
+ .cfi_rel_offset x22, 64
+ .cfi_rel_offset x23, 72
+
+ stp x20, x21, [x10, #48]
+ .cfi_rel_offset x20, 48
+ .cfi_rel_offset x21, 56
+
stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
@@ -573,6 +592,25 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.cfi_restore x4
.cfi_restore x5
+ ldr x28, [xFP, #112]
+ .cfi_restore x28
+
+ ldp x26, x27, [xFP, #96]
+ .cfi_restore x26
+ .cfi_restore x27
+
+ ldp x24, x25, [xFP, #80]
+ .cfi_restore x24
+ .cfi_restore x25
+
+ ldp x22, x23, [xFP, #64]
+ .cfi_restore x22
+ .cfi_restore x23
+
+ ldp x20, x21, [xFP, #48]
+ .cfi_restore x20
+ .cfi_restore x21
+
// Store result (w0/x0/s0/d0) appropriately, depending on resultType.
ldrb w10, [x5]
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 0d18f1a9ba..eea6537f01 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -66,5 +66,54 @@
END \name
.endm
+#if defined(__mips_isa_rev) && __mips_isa_rev > 2
+ /* mips32r5 & mips32r6 have mthc1 op, and have 64-bit fp regs,
+ and in FPXX abi we avoid referring to odd-numbered fp regs */
+
+/* LDu: Load 64-bit floating-point value to float reg feven,
+ from unaligned (mod-4-aligned) mem location disp(base) */
+.macro LDu feven,fodd,disp,base,temp
+ l.s \feven, \disp(\base)
+ lw \temp, \disp+4(\base)
+ mthc1 \temp, \feven
+.endm
+
+/* SDu: Store 64-bit floating-point value from float reg feven,
+ to unaligned (mod-4-aligned) mem location disp(base) */
+.macro SDu feven,fodd,disp,base,temp
+ mfhc1 \temp, \feven
+ s.s \feven, \disp(\base)
+ sw \temp, \disp+4(\base)
+.endm
+
+/* MTD: Move double, from general regpair (reven,rodd)
+ to float regpair (feven,fodd) */
+.macro MTD reven,rodd,feven,fodd
+ mtc1 \reven, \feven
+ mthc1 \rodd, \feven
+.endm
+
+#else
+ /* mips32r1 has no mthc1 op;
+ mips32r1 and mips32r2 use 32-bit floating point register mode (FR=0),
+ and always hold doubles as (feven, fodd) fp reg pair */
+
+.macro LDu feven,fodd,disp,base,temp
+ l.s \feven, \disp(\base)
+ l.s \fodd, \disp+4(\base)
+.endm
+
+.macro SDu feven,fodd,disp,base,temp
+ s.s \feven, \disp(\base)
+ s.s \fodd, \disp+4(\base)
+.endm
+
+.macro MTD reven,rodd,feven,fodd
+ mtc1 \reven, \feven
+ mtc1 \rodd, \fodd
+.endm
+
+#endif /* mips_isa_rev */
+
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index e86aa1c16d..1a661c479f 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -17,7 +17,6 @@
#include "atomic.h"
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -60,7 +59,7 @@ extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ QuickEntryPoints* qpoints) {
// Interpreter
ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
@@ -68,10 +67,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
- // Portable
- ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
- ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
-
// Alloc
ResetQuickAllocEntryPoints(qpoints);
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
deleted file mode 100644
index 8d418e8dd1..0000000000
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips.S"
-
- .set noreorder
- .balign 4
-
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- # Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
- # TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
- addiu $sp, $sp, -64
- .cfi_adjust_cfa_offset 64
- sw $ra, 60($sp)
- .cfi_rel_offset 31, 60
- sw $s8, 56($sp)
- .cfi_rel_offset 30, 56
- sw $gp, 52($sp)
- .cfi_rel_offset 28, 52
- sw $s7, 48($sp)
- .cfi_rel_offset 23, 48
- sw $s6, 44($sp)
- .cfi_rel_offset 22, 44
- sw $s5, 40($sp)
- .cfi_rel_offset 21, 40
- sw $s4, 36($sp)
- .cfi_rel_offset 20, 36
- sw $s3, 32($sp)
- .cfi_rel_offset 19, 32
- sw $s2, 28($sp)
- .cfi_rel_offset 18, 28
- sw $a3, 12($sp)
- .cfi_rel_offset 7, 12
- sw $a2, 8($sp)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- # Begin argument set up.
- sw $a0, 0($sp) # place proxy method at bottom of frame
- move $a2, rSELF # pass Thread::Current
- jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
- lw $ra, 60($sp) # restore $ra
- jr $ra
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
-END art_portable_proxy_invoke_handler
-
- /*
- * Invocation stub for portable code.
- * On entry:
- * a0 = method pointer
- * a1 = argument array or NULL for no argument methods
- * a2 = size of argument array in bytes
- * a3 = (managed) thread pointer
- * [sp + 16] = JValue* result
- * [sp + 20] = result type char
- */
-ENTRY art_portable_invoke_stub
- sw $a0, 0($sp) # save out a0
- addiu $sp, $sp, -16 # spill s0, s1, fp, ra
- .cfi_adjust_cfa_offset 16
- sw $ra, 12($sp)
- .cfi_rel_offset 31, 12
- sw $fp, 8($sp)
- .cfi_rel_offset 30, 8
- sw $s1, 4($sp)
- .cfi_rel_offset 17, 4
- sw $s0, 0($sp)
- .cfi_rel_offset 16, 0
- move $fp, $sp # save sp in fp
- .cfi_def_cfa_register 30
- move $s1, $a3 # move managed thread pointer into s1
- addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval. TODO: unused?
- addiu $t0, $a2, 16 # create space for method pointer in frame
- srl $t0, $t0, 3 # shift the frame size right 3
- sll $t0, $t0, 3 # shift the frame size left 3 to align to 16 bytes
- subu $sp, $sp, $t0 # reserve stack space for argument array
- addiu $a0, $sp, 4 # pass stack pointer + method ptr as dest for memcpy
- jal memcpy # (dest, src, bytes)
- addiu $sp, $sp, -16 # make space for argument slots for memcpy
- addiu $sp, $sp, 16 # restore stack after memcpy
- lw $a0, 16($fp) # restore method*
- lw $a1, 4($sp) # copy arg value for a1
- lw $a2, 8($sp) # copy arg value for a2
- lw $a3, 12($sp) # copy arg value for a3
- lw $t9, MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32($a0) # get pointer to the code
- jalr $t9 # call the method
- sw $zero, 0($sp) # store NULL for method* at bottom of frame
- move $sp, $fp # restore the stack
- lw $s0, 0($sp)
- .cfi_restore 16
- lw $s1, 4($sp)
- .cfi_restore 17
- lw $fp, 8($sp)
- .cfi_restore 30
- lw $ra, 12($sp)
- .cfi_restore 31
- addiu $sp, $sp, 16
- .cfi_adjust_cfa_offset -16
- lw $t0, 16($sp) # get result pointer
- lw $t1, 20($sp) # get result type char
- li $t2, 68 # put char 'D' into t2
- beq $t1, $t2, 1f # branch if result type char == 'D'
- li $t3, 70 # put char 'F' into t3
- beq $t1, $t3, 1f # branch if result type char == 'F'
- sw $v0, 0($t0) # store the result
- jr $ra
- sw $v1, 4($t0) # store the other half of the result
-1:
- s.s $f0, 0($t0) # store floating point result
- jr $ra
- s.s $f1, 4($t0) # store other half of floating point result
-END art_portable_invoke_stub
-
-UNIMPLEMENTED art_portable_resolution_trampoline
-UNIMPLEMENTED art_portable_to_interpreter_bridge
-UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 44feee6a65..fb792389e7 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -314,38 +314,23 @@
* FIXME: just guessing about the shape of the jmpbuf. Where will pc be?
*/
ENTRY art_quick_do_long_jump
- l.s $f0, 0($a1)
- l.s $f1, 4($a1)
- l.s $f2, 8($a1)
- l.s $f3, 12($a1)
- l.s $f4, 16($a1)
- l.s $f5, 20($a1)
- l.s $f6, 24($a1)
- l.s $f7, 28($a1)
- l.s $f8, 32($a1)
- l.s $f9, 36($a1)
- l.s $f10, 40($a1)
- l.s $f11, 44($a1)
- l.s $f12, 48($a1)
- l.s $f13, 52($a1)
- l.s $f14, 56($a1)
- l.s $f15, 60($a1)
- l.s $f16, 64($a1)
- l.s $f17, 68($a1)
- l.s $f18, 72($a1)
- l.s $f19, 76($a1)
- l.s $f20, 80($a1)
- l.s $f21, 84($a1)
- l.s $f22, 88($a1)
- l.s $f23, 92($a1)
- l.s $f24, 96($a1)
- l.s $f25, 100($a1)
- l.s $f26, 104($a1)
- l.s $f27, 108($a1)
- l.s $f28, 112($a1)
- l.s $f29, 116($a1)
- l.s $f30, 120($a1)
- l.s $f31, 124($a1)
+ LDu $f0, $f1, 0*8, $a1, $t1
+ LDu $f2, $f3, 1*8, $a1, $t1
+ LDu $f4, $f5, 2*8, $a1, $t1
+ LDu $f6, $f7, 3*8, $a1, $t1
+ LDu $f8, $f9, 4*8, $a1, $t1
+ LDu $f10, $f11, 5*8, $a1, $t1
+ LDu $f12, $f13, 6*8, $a1, $t1
+ LDu $f14, $f15, 7*8, $a1, $t1
+ LDu $f16, $f17, 8*8, $a1, $t1
+ LDu $f18, $f19, 9*8, $a1, $t1
+ LDu $f20, $f21, 10*8, $a1, $t1
+ LDu $f22, $f23, 11*8, $a1, $t1
+ LDu $f24, $f25, 12*8, $a1, $t1
+ LDu $f26, $f27, 13*8, $a1, $t1
+ LDu $f28, $f29, 14*8, $a1, $t1
+ LDu $f30, $f31, 15*8, $a1, $t1
+
.set push
.set nomacro
.set noat
@@ -558,9 +543,9 @@ ENTRY art_quick_invoke_stub
jr $ra
sw $v1, 4($t0) # store the other half of the result
1:
- s.s $f0, 0($t0) # store floating point result
+ SDu $f0, $f1, 0, $t0, $t1 # store floating point result
jr $ra
- s.s $f1, 4($t0) # store other half of floating point result
+ nop
END art_quick_invoke_stub
/*
@@ -699,7 +684,7 @@ ENTRY art_quick_aput_obj
lw $a2, 8($sp)
lw $a1, 4($sp)
lw $a0, 0($sp)
- add $sp, 32
+ addiu $sp, 32
.cfi_adjust_cfa_offset -32
bnez $v0, .Ldo_aput
nop
@@ -1080,7 +1065,7 @@ GENERATE_ALL_ALLOC_ENTRYPOINTS
ENTRY art_quick_test_suspend
lh $a0, THREAD_FLAGS_OFFSET(rSELF)
bnez $a0, 1f
- addi rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ addiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
jr $ra
nop
1:
@@ -1103,9 +1088,10 @@ ENTRY art_quick_proxy_invoke_handler
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
- mtc1 $v0, $f0 # place return value to FP return value
+ # don't care if $v0 and/or $v1 are modified, when exception branch taken
+ MTD $v0, $v1, $f0, $f1 # move float value to return value
jr $ra
- mtc1 $v1, $f1 # place return value to FP return value
+ nop
1:
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
@@ -1191,9 +1177,9 @@ ENTRY art_quick_generic_jni_trampoline
# tear dpown the callee-save frame
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
- mtc1 $v0, $f0 # place return value to FP return value
+ MTD $v0, $v1, $f0, $f1 # move float value to return value
jr $ra
- mtc1 $v1, $f1 # place return value to FP return value
+ nop
1:
move $sp, $s8 # tear down the alloca
@@ -1211,9 +1197,10 @@ ENTRY art_quick_to_interpreter_bridge
lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
bnez $t0, 1f
- mtc1 $v0, $f0 # place return value to FP return value
+ # don't care if $v0 and/or $v1 are modified, when exception branch taken
+ MTD $v0, $v1, $f0, $f1 # move float value to return value
jr $ra
- mtc1 $v1, $f1 # place return value to FP return value
+ nop
1:
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
@@ -1248,12 +1235,10 @@ art_quick_instrumentation_exit:
sw $v0, 12($sp)
.cfi_rel_offset 2, 32
sw $v1, 8($sp)
- .cfi_rel_offset 3, 36
- s.s $f0, 4($sp)
- s.s $f1, 0($sp)
+ .cfi_rel_offset 3, 36
+ s.d $f0, 0($sp)
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
- s.s $f0, 16($sp) # pass fpr result
- s.s $f1, 20($sp)
+ s.d $f0, 16($sp) # pass fpr result
move $a2, $v0 # pass gpr result
move $a3, $v1
addiu $a1, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots)
@@ -1264,8 +1249,7 @@ art_quick_instrumentation_exit:
addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE # args slot + refs_only callee save frame
lw $v0, 12($sp) # restore return values
lw $v1, 8($sp)
- l.s $f0, 4($sp)
- l.s $f1, 0($sp)
+ l.d $f0, 0($sp)
jr $t0 # return
addiu $sp, $sp, 16 # remove temp storage from stack
.cfi_adjust_cfa_offset -16
@@ -1300,11 +1284,15 @@ ENTRY_NO_GP art_quick_shl_long
srl $a0, 1
srl $a0, $v1 # alo<- alo >> (32-(shift&31))
sll $v1, $a1, $a2 # rhi<- ahi << (shift&31)
- or $v1, $a0 # rhi<- rhi | alo
andi $a2, 0x20 # shift< shift & 0x20
- movn $v1, $v0, $a2 # rhi<- rlo (if shift&0x20)
- jr $ra
- movn $v0, $zero, $a2 # rlo<- 0 (if shift&0x20)
+ beqz $a2, 1f
+ or $v1, $a0 # rhi<- rhi | alo
+
+ move $v1, $v0 # rhi<- rlo (if shift&0x20)
+ move $v0, $zero # rlo<- 0 (if shift&0x20)
+
+1: jr $ra
+ nop
END art_quick_shl_long
/*
@@ -1324,11 +1312,15 @@ ENTRY_NO_GP art_quick_shr_long
not $a0, $a2 # alo<- 31-shift (shift is 5b)
sll $a1, 1
sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
- or $v0, $a1 # rlo<- rlo | ahi
andi $a2, 0x20 # shift & 0x20
- movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20)
- jr $ra
- movn $v1, $a3, $a2 # rhi<- sign(ahi) (if shift&0x20)
+ beqz $s2, 1f
+ or $v0, $a1 # rlo<- rlo | ahi
+
+ move $v0, $v1 # rlo<- rhi (if shift&0x20)
+ move $v1, $a3 # rhi<- sign(ahi) (if shift&0x20)
+
+1: jr $ra
+ nop
END art_quick_shr_long
/*
@@ -1348,11 +1340,15 @@ ENTRY_NO_GP art_quick_ushr_long
not $a0, $a2 # alo<- 31-shift (shift is 5b)
sll $a1, 1
sll $a1, $a0 # ahi<- ahi << (32-(shift&31))
- or $v0, $a1 # rlo<- rlo | ahi
andi $a2, 0x20 # shift & 0x20
- movn $v0, $v1, $a2 # rlo<- rhi (if shift&0x20)
- jr $ra
- movn $v1, $zero, $a2 # rhi<- 0 (if shift&0x20)
+ beqz $a2, 1f
+ or $v0, $a1 # rlo<- rlo | ahi
+
+ move $v0, $v1 # rlo<- rhi (if shift&0x20)
+ move $v1, $zero # rhi<- 0 (if shift&0x20)
+
+1: jr $ra
+ nop
END art_quick_ushr_long
UNIMPLEMENTED art_quick_indexof
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index a1215420a4..2ac5279a52 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -16,7 +16,6 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -34,7 +33,7 @@ extern "C" double art_quick_fmod(double, double);
extern "C" float art_quick_fmodf(float, float);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ QuickEntryPoints* qpoints) {
// Interpreter
ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
@@ -42,10 +41,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
- // Portable
- ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
- ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
-
// Alloc
ResetQuickAllocEntryPoints(qpoints);
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
deleted file mode 100644
index 1f0900e86d..0000000000
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_x86.S"
-
- /*
- * Portable invocation stub.
- * On entry:
- * [sp] = return address
- * [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
- * [sp + 12] = size of argument array in bytes
- * [sp + 16] = (managed) thread pointer
- * [sp + 20] = JValue* result
- * [sp + 24] = result type char
- */
-DEFINE_FUNCTION art_portable_invoke_stub
- PUSH ebp // save ebp
- PUSH ebx // save ebx
- mov %esp, %ebp // copy value of stack pointer into base pointer
- CFI_DEF_CFA_REGISTER(ebp)
- mov 20(%ebp), %ebx // get arg array size
- addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
- andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
- subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
- subl %ebx, %esp // reserve stack space for argument array
- SETUP_GOT_NOSAVE ebx // reset ebx to GOT table
- lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
- pushl 20(%ebp) // push size of region to memcpy
- pushl 16(%ebp) // push arg array as source of memcpy
- pushl %eax // push stack pointer as destination of memcpy
- call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
- addl LITERAL(12), %esp // pop arguments to memcpy
- mov 12(%ebp), %eax // move method pointer into eax
- mov %eax, (%esp) // push method pointer onto stack
- call *MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method
- mov %ebp, %esp // restore stack pointer
- POP ebx // pop ebx
- POP ebp // pop ebp
- mov 20(%esp), %ecx // get result pointer
- cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
- je .Lreturn_double_portable
- cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
- je .Lreturn_float_portable
- mov %eax, (%ecx) // store the result
- mov %edx, 4(%ecx) // store the other half of the result
- ret
-.Lreturn_double_portable:
- fstpl (%ecx) // store the floating point result as double
- ret
-.Lreturn_float_portable:
- fstps (%ecx) // store the floating point result as float
- ret
-END_FUNCTION art_portable_invoke_stub
-
-DEFINE_FUNCTION art_portable_proxy_invoke_handler
- PUSH ebp // Set up frame.
- movl %esp, %ebp
- CFI_DEF_CFA_REGISTER(%ebp)
- subl LITERAL(8), %esp // Align stack
- leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
- movl 12(%ebp), %ecx // %ecx = receiver
- movl 0(%edx), %eax // %eax = ArtMethod* called
- pushl %edx // Pass called_addr.
- pushl %fs:THREAD_SELF_OFFSET // Pass thread.
- pushl %ecx // Pass receiver.
- pushl %eax // Pass called.
- call SYMBOL(artPortableProxyInvokeHandler) // (called, receiver, Thread*, &called)
- leave
- CFI_RESTORE(%ebp)
- CFI_DEF_CFA(%esp, 4)
- movd %eax, %xmm0 // Place return value also into floating point return value.
- movd %edx, %xmm1
- punpckldq %xmm1, %xmm0
- ret
-END_FUNCTION art_portable_proxy_invoke_handler
-
-DEFINE_FUNCTION art_portable_resolution_trampoline
- PUSH ebp // Set up frame.
- movl %esp, %ebp
- CFI_DEF_CFA_REGISTER(%ebp)
- subl LITERAL(8), %esp // Align stack
- leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
- movl 12(%ebp), %ecx // %ecx = receiver
- movl 0(%edx), %eax // %eax = ArtMethod* called
- pushl %edx // Pass called_addr.
- pushl %fs:THREAD_SELF_OFFSET // Pass thread.
- pushl %ecx // Pass receiver.
- pushl %eax // Pass called.
- call SYMBOL(artPortableResolutionTrampoline) // (called, receiver, Thread*, &called)
- leave
- CFI_RESTORE(%ebp)
- CFI_DEF_CFA(%esp, 4)
- testl %eax, %eax
- jz .Lresolve_fail
- jmp * %eax
-.Lresolve_fail: // Resolution failed, return with exception pending.
- ret
-END_FUNCTION art_portable_resolution_trampoline
-
-DEFINE_FUNCTION art_portable_to_interpreter_bridge
- PUSH ebp // Set up frame.
- movl %esp, %ebp
- CFI_DEF_CFA_REGISTER(%ebp)
- subl LITERAL(12), %esp // Align stack
- leal 8(%ebp), %edx // %edx = ArtMethod** called_addr
- movl 0(%edx), %eax // %eax = ArtMethod* called
- pushl %edx // Pass called_addr.
- pushl %fs:THREAD_SELF_OFFSET // Pass thread.
- pushl %eax // Pass called.
- call SYMBOL(artPortableToInterpreterBridge) // (called, Thread*, &called)
- leave
- CFI_RESTORE(%ebp)
- CFI_DEF_CFA(%esp, 4)
- ret
-END_FUNCTION art_portable_to_interpreter_bridge
-
-UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 0bfa1ce688..302b9f85cb 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -297,28 +297,34 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
DEFINE_FUNCTION art_quick_invoke_stub
PUSH ebp // save ebp
PUSH ebx // save ebx
+ PUSH esi // save esi
+ PUSH edi // save edi
mov %esp, %ebp // copy value of stack pointer into base pointer
CFI_DEF_CFA_REGISTER(ebp)
- mov 20(%ebp), %ebx // get arg array size
- addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
- andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
- subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ mov 28(%ebp), %ebx // get arg array size
+ // reserve space for return addr, method*, ebx, ebp, esi, and edi in frame
+ addl LITERAL(36), %ebx
+ // align frame size to 16 bytes
+ andl LITERAL(0xFFFFFFF0), %ebx
+ subl LITERAL(20), %ebx // remove space for return address, ebx, ebp, esi and edi
subl %ebx, %esp // reserve stack space for argument array
SETUP_GOT_NOSAVE ebx // clobbers ebx (harmless here)
lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
- pushl 20(%ebp) // push size of region to memcpy
- pushl 16(%ebp) // push arg array as source of memcpy
+ pushl 28(%ebp) // push size of region to memcpy
+ pushl 24(%ebp) // push arg array as source of memcpy
pushl %eax // push stack pointer as destination of memcpy
call PLT_SYMBOL(memcpy) // (void*, const void*, size_t)
addl LITERAL(12), %esp // pop arguments to memcpy
movl LITERAL(0), (%esp) // store NULL for method*
- mov 12(%ebp), %eax // move method pointer into eax
+ mov 20(%ebp), %eax // move method pointer into eax
mov 4(%esp), %ecx // copy arg1 into ecx
mov 8(%esp), %edx // copy arg2 into edx
mov 12(%esp), %ebx // copy arg3 into ebx
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
mov %ebp, %esp // restore stack pointer
CFI_DEF_CFA_REGISTER(esp)
+ POP edi // pop edi
+ POP esi // pop esi
POP ebx // pop ebx
POP ebp // pop ebp
mov 20(%esp), %ecx // get result pointer
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 2cfcfed209..3f1e4b5948 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -16,7 +16,6 @@
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -31,9 +30,9 @@ extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
const mirror::Class* ref_class);
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+ QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
- UNUSED(ipoints, jpoints, ppoints, qpoints);
+ UNUSED(ipoints, jpoints, qpoints);
UNIMPLEMENTED(FATAL);
#else
// Interpreter
@@ -43,10 +42,6 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
- // Portable
- ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
- ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
-
// Alloc
ResetQuickAllocEntryPoints(qpoints);
diff --git a/runtime/arch/x86_64/portable_entrypoints_x86_64.S b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
deleted file mode 100644
index 3a54005aee..0000000000
--- a/runtime/arch/x86_64/portable_entrypoints_x86_64.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_x86_64.S"
-
- /*
- * Portable invocation stub.
- */
-UNIMPLEMENTED art_portable_invoke_stub
-
-UNIMPLEMENTED art_portable_proxy_invoke_handler
-
-UNIMPLEMENTED art_portable_resolution_trampoline
-
-UNIMPLEMENTED art_portable_to_interpreter_bridge
-
-UNIMPLEMENTED art_portable_imt_conflict_trampoline
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 7f85ab71b4..5ae65db0f7 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -487,15 +487,21 @@ DEFINE_FUNCTION art_quick_invoke_stub
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
+ PUSH rbx // Save native callee save rbx
+ PUSH r12 // Save native callee save r12
+ PUSH r13 // Save native callee save r13
+ PUSH r14 // Save native callee save r14
+ PUSH r15 // Save native callee save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
- addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
- // r8 and r9 in frame.
- andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
- subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
- subq %rdx, %rsp // Reserve stack space for argument array.
+ addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8, r9, rbx, r12, r13, r14, and r15 in frame.
+ andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
+ subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
+ // r13, r14, and r15
+ subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
@@ -503,15 +509,15 @@ DEFINE_FUNCTION art_quick_invoke_stub
movl LITERAL(0), (%rsp) // Store NULL for method*
movl %r10d, %ecx // Place size of args in rcx.
- movq %rdi, %rax // RAX := method to be called
- movq %rsi, %r11 // R11 := arg_array
- leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
+ movq %rdi, %rax // rax := method to be called
+ movq %rsi, %r11 // r11 := arg_array
+ leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
// stack arguments.
// Copy arg array into stack.
rep movsb // while (rcx--) { *rdi++ = *rsi++ }
- leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
- movq %rax, %rdi // RDI := method to be called
- movl (%r11), %esi // RSI := this pointer
+ leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
+ movq %rax, %rdi // rdi := method to be called
+ movl (%r11), %esi // rsi := this pointer
addq LITERAL(4), %r11 // arg_array++
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished
@@ -520,8 +526,12 @@ DEFINE_FUNCTION art_quick_invoke_stub
.Lgpr_setup_finished:
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
movq %rbp, %rsp // Restore stack pointer.
- CFI_DEF_CFA_REGISTER(rsp)
- POP r9 // Pop r9 - shorty*.
+ POP r15 // Pop r15
+ POP r14 // Pop r14
+ POP r13 // Pop r13
+ POP r12 // Pop r12
+ POP rbx // Pop rbx
+ POP r9 // Pop r9 - shorty*
POP r8 // Pop r8 - result*.
POP rbp // Pop rbp
cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
@@ -531,10 +541,10 @@ DEFINE_FUNCTION art_quick_invoke_stub
movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick:
- movsd %xmm0, (%r8) // Store the double floating point result.
+ movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick:
- movss %xmm0, (%r8) // Store the floating point result.
+ movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_stub
@@ -571,30 +581,36 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
PUSH rbp // Save rbp.
PUSH r8 // Save r8/result*.
PUSH r9 // Save r9/shorty*.
+ PUSH rbx // Save rbx
+ PUSH r12 // Save r12
+ PUSH r13 // Save r13
+ PUSH r14 // Save r14
+ PUSH r15 // Save r15
movq %rsp, %rbp // Copy value of stack pointer into base pointer.
CFI_DEF_CFA_REGISTER(rbp)
movl %edx, %r10d
- addl LITERAL(60), %edx // Reserve space for return addr, StackReference<method>, rbp,
- // r8 and r9 in frame.
- andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
- subl LITERAL(32), %edx // Remove space for return address, rbp, r8 and r9.
- subq %rdx, %rsp // Reserve stack space for argument array.
+ addl LITERAL(100), %edx // Reserve space for return addr, StackReference<method>, rbp,
+ // r8, r9, r12, r13, r14, and r15 in frame.
+ andl LITERAL(0xFFFFFFF0), %edx // Align frame size to 16 bytes.
+ subl LITERAL(72), %edx // Remove space for return address, rbp, r8, r9, rbx, r12,
+ // r13, r14, and r15.
+ subq %rdx, %rsp // Reserve stack space for argument array.
#if (STACK_REFERENCE_SIZE != 4)
#error "STACK_REFERENCE_SIZE(X86_64) size not as expected."
#endif
- movl LITERAL(0), (%rsp) // Store NULL for method*
+ movl LITERAL(0), (%rsp) // Store NULL for method*
- movl %r10d, %ecx // Place size of args in rcx.
- movq %rdi, %rax // RAX := method to be called
- movq %rsi, %r11 // R11 := arg_array
- leaq 4(%rsp), %rdi // Rdi is pointing just above the StackReference<method> in the
- // stack arguments.
+ movl %r10d, %ecx // Place size of args in rcx.
+ movq %rdi, %rax // rax := method to be called
+ movq %rsi, %r11 // r11 := arg_array
+ leaq 4(%rsp), %rdi // rdi is pointing just above the StackReference<method> in the
+ // stack arguments.
// Copy arg array into stack.
- rep movsb // while (rcx--) { *rdi++ = *rsi++ }
- leaq 1(%r9), %r10 // R10 := shorty + 1 ; ie skip return arg character
- movq %rax, %rdi // RDI := method to be called
+ rep movsb // while (rcx--) { *rdi++ = *rsi++ }
+ leaq 1(%r9), %r10 // r10 := shorty + 1 ; ie skip return arg character
+ movq %rax, %rdi // rdi := method to be called
LOOP_OVER_SHORTY_LOADING_GPRS rsi, esi, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, .Lgpr_setup_finished2
LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, .Lgpr_setup_finished2
@@ -602,22 +618,26 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
.Lgpr_setup_finished2:
call *MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
- movq %rbp, %rsp // Restore stack pointer.
- CFI_DEF_CFA_REGISTER(rsp)
- POP r9 // Pop r9 - shorty*.
- POP r8 // Pop r8 - result*.
- POP rbp // Pop rbp
- cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
+ movq %rbp, %rsp // Restore stack pointer.
+ POP r15 // Pop r15
+ POP r14 // Pop r14
+ POP r13 // Pop r13
+ POP r12 // Pop r12
+ POP rbx // Pop rbx
+ POP r9 // Pop r9 - shorty*.
+ POP r8 // Pop r8 - result*.
+ POP rbp // Pop rbp
+ cmpb LITERAL(68), (%r9) // Test if result type char == 'D'.
je .Lreturn_double_quick2
- cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
+ cmpb LITERAL(70), (%r9) // Test if result type char == 'F'.
je .Lreturn_float_quick2
- movq %rax, (%r8) // Store the result assuming its a long, int or Object*
+ movq %rax, (%r8) // Store the result assuming its a long, int or Object*
ret
.Lreturn_double_quick2:
- movsd %xmm0, (%r8) // Store the double floating point result.
+ movsd %xmm0, (%r8) // Store the double floating point result.
ret
.Lreturn_float_quick2:
- movss %xmm0, (%r8) // Store the floating point result.
+ movss %xmm0, (%r8) // Store the floating point result.
ret
#endif // __APPLE__
END_FUNCTION art_quick_invoke_static_stub
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 7454cca65a..a35e05b87b 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -148,18 +148,10 @@ ADD_TEST_EQ(MIRROR_STRING_OFFSET_OFFSET, art::mirror::String::OffsetOffset().Int
ADD_TEST_EQ(MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET,
art::mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())
-#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32 (40 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value())
-
#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32 (36 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value())
-#define MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64 (56 + MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
- art::mirror::ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value())
-
#define MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64 (48 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
art::mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value())
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index bdc4cf6399..b781d6008c 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -35,6 +35,8 @@ namespace art {
LogVerbosity gLogVerbosity;
+unsigned int gAborting = 0;
+
static LogSeverity gMinimumLogSeverity = INFO;
static std::unique_ptr<std::string> gCmdLine;
static std::unique_ptr<std::string> gProgramInvocationName;
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index a9cc99b085..ae83e331fd 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -55,6 +55,11 @@ struct LogVerbosity {
// Global log verbosity setting, initialized by InitLogging.
extern LogVerbosity gLogVerbosity;
+// 0 if not abort, non-zero if an abort is in progress. Used on fatal exit to prevents recursive
+// aborts. Global declaration allows us to disable some error checking to ensure fatal shutdown
+// makes forward progress.
+extern unsigned int gAborting;
+
// Configure logging based on ANDROID_LOG_TAGS environment variable.
// We need to parse a string that looks like
//
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 66d6fabbcd..f705469c89 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -158,6 +158,8 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE
#endif
+#define NO_INLINE __attribute__ ((noinline))
+
#if defined (__APPLE__)
#define HOT_ATTR
#define COLD_ATTR
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index 020634122e..cb698175df 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -97,7 +97,9 @@ inline void BaseMutex::RegisterAsLocked(Thread* self) {
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) { // Avoid recursive aborts.
+ CHECK(!bad_mutexes_held);
+ }
}
// Don't record monitors as they are outside the scope of analysis. They may be inspected off of
// the monitor list.
@@ -112,7 +114,7 @@ inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
return;
}
if (level_ != kMonitorLock) {
- if (kDebugLocking) {
+ if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
}
self->SetHeldMutex(level_, NULL);
@@ -176,7 +178,7 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
if (kDebugLocking) {
// Sanity debug check that if we think it is locked we have it in our held mutexes.
- if (result && self != NULL && level_ != kMonitorLock) {
+ if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
CHECK_EQ(self->GetHeldMutex(level_), this);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 49579886fd..a4eb318d4c 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -209,7 +209,9 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) { // Avoid recursive aborts.
+ CHECK(!bad_mutexes_held);
+ }
}
}
@@ -325,8 +327,12 @@ Mutex::~Mutex() {
LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
} else {
CHECK_EQ(exclusive_owner_, 0U) << "unexpectedly found an owner on unlocked mutex " << name_;
- CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
- << "unexpectedly found a contender on mutex " << name_;
+ if (level_ != kMonitorLock) {
+ // Only check the lock level for non monitor locks since we may still have java threads
+ // waiting on monitors.
+ CHECK_EQ(num_contenders_.LoadSequentiallyConsistent(), 0)
+ << "unexpectedly found a contender on mutex " << name_;
+ }
}
#else
// We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
@@ -428,7 +434,17 @@ bool Mutex::ExclusiveTryLock(Thread* self) {
}
void Mutex::ExclusiveUnlock(Thread* self) {
- DCHECK(self == NULL || self == Thread::Current());
+ if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
+ std::string name1 = "<null>";
+ std::string name2 = "<null>";
+ if (self != nullptr) {
+ self->GetThreadName(name1);
+ }
+ if (Thread::Current() != nullptr) {
+ Thread::Current()->GetThreadName(name2);
+ }
+ LOG(FATAL) << name1 << " " << name2;
+ }
AssertHeld(self);
DCHECK_NE(exclusive_owner_, 0U);
recursion_count_--;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 41b5f12fdb..9c93cc624d 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -220,7 +220,7 @@ class LOCKABLE Mutex : public BaseMutex {
// Assert that the Mutex is exclusively held by the current thread.
void AssertExclusiveHeld(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
@@ -228,7 +228,7 @@ class LOCKABLE Mutex : public BaseMutex {
// Assert that the Mutex is not held by the current thread.
void AssertNotHeldExclusive(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
@@ -318,7 +318,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread has exclusive access to the ReaderWriterMutex.
void AssertExclusiveHeld(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
CHECK(IsExclusiveHeld(self)) << *this;
}
}
@@ -326,7 +326,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
void AssertNotExclusiveHeld(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsExclusiveHeld(self)) << *this;
}
}
@@ -337,7 +337,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread has shared access to the ReaderWriterMutex.
void AssertSharedHeld(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
// TODO: we can only assert this well when self != NULL.
CHECK(IsSharedHeld(self) || self == NULL) << *this;
}
@@ -347,7 +347,7 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
// Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
// mode.
void AssertNotHeld(const Thread* self) {
- if (kDebugLocking) {
+ if (kDebugLocking && (gAborting == 0)) {
CHECK(!IsSharedHeld(self)) << *this;
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 1f4cf8fcfc..d119a56da3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -21,6 +21,7 @@
#include <memory>
#include <queue>
#include <string>
+#include <unistd.h>
#include <utility>
#include <vector>
@@ -237,9 +238,7 @@ ClassLinker::ClassLinker(InternTable* intern_table)
log_new_dex_caches_roots_(false),
log_new_class_table_roots_(false),
intern_table_(intern_table),
- portable_resolution_trampoline_(nullptr),
quick_resolution_trampoline_(nullptr),
- portable_imt_conflict_trampoline_(nullptr),
quick_imt_conflict_trampoline_(nullptr),
quick_generic_jni_trampoline_(nullptr),
quick_to_interpreter_bridge_trampoline_(nullptr),
@@ -575,6 +574,23 @@ void ClassLinker::InitWithoutImage(const std::vector<const DexFile*>& boot_class
FindSystemClass(self, "[Ljava/lang/StackTraceElement;"));
mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement));
+ // Ensure void type is resolved in the core's dex cache so java.lang.Void is correctly
+ // initialized.
+ {
+ const DexFile& dex_file = java_lang_Object->GetDexFile();
+ const DexFile::StringId* void_string_id = dex_file.FindStringId("V");
+ CHECK(void_string_id != nullptr);
+ uint32_t void_string_index = dex_file.GetIndexForStringId(*void_string_id);
+ const DexFile::TypeId* void_type_id = dex_file.FindTypeId(void_string_index);
+ CHECK(void_type_id != nullptr);
+ uint16_t void_type_idx = dex_file.GetIndexForTypeId(*void_type_id);
+ // Now we resolve void type so the dex cache contains it. We use java.lang.Object class
+ // as referrer so the used dex cache is core's one.
+ mirror::Class* resolved_type = ResolveType(dex_file, void_type_idx, java_lang_Object.Get());
+ CHECK_EQ(resolved_type, GetClassRoot(kPrimitiveVoid));
+ self->AssertNoPendingException();
+ }
+
FinishInit(self);
VLOG(startup) << "ClassLinker::InitFromCompiler exiting";
@@ -704,7 +720,14 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename,
argv.push_back(compiler_options[i].c_str());
}
- return Exec(argv, error_msg);
+ if (!Exec(argv, error_msg)) {
+ // Manually delete the file. Ensures there is no garbage left over if the process unexpectedly
+ // died. Ignore unlink failure, propagate the original error.
+ TEMP_FAILURE_RETRY(unlink(oat_cache_filename));
+ return false;
+ }
+
+ return true;
}
const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
@@ -1614,8 +1637,6 @@ void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void*
if (method != Runtime::Current()->GetResolutionMethod()) {
method->SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(),
pointer_size);
- method->SetEntryPointFromPortableCompiledCodePtrSize(GetPortableToInterpreterBridge(),
- pointer_size);
}
}
}
@@ -1636,9 +1657,7 @@ void ClassLinker::InitFromImage() {
const char* image_file_location = oat_file.GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
CHECK(image_file_location == nullptr || *image_file_location == 0);
- portable_resolution_trampoline_ = oat_file.GetOatHeader().GetPortableResolutionTrampoline();
quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline();
- portable_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetPortableImtConflictTrampoline();
quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
quick_to_interpreter_bridge_trampoline_ = oat_file.GetOatHeader().GetQuickToInterpreterBridge();
@@ -2504,9 +2523,6 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
if (method->IsNative()) {
// No code and native? Use generic trampoline.
result = GetQuickGenericJniStub();
- } else if (method->IsPortableCompiled()) {
- // No code? Do we expect portable code?
- result = GetQuickToPortableBridge();
} else {
// No code? You must mean to go into the interpreter.
result = GetQuickToInterpreterBridge();
@@ -2515,36 +2531,6 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
return result;
}
-const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
- bool* have_portable_code) {
- CHECK(!method->IsAbstract()) << PrettyMethod(method);
- *have_portable_code = false;
- if (method->IsProxyMethod()) {
- return GetPortableProxyInvokeHandler();
- }
- bool found;
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- const void* result = nullptr;
- const void* quick_code = nullptr;
- if (found) {
- result = oat_method.GetPortableCode();
- quick_code = oat_method.GetQuickCode();
- }
-
- if (result == nullptr) {
- if (quick_code == nullptr) {
- // No code? You must mean to go into the interpreter.
- result = GetPortableToInterpreterBridge();
- } else {
- // No code? But there's quick code, so use a bridge.
- result = GetPortableToQuickBridge();
- }
- } else {
- *have_portable_code = true;
- }
- return result;
-}
-
const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
return nullptr;
@@ -2554,15 +2540,6 @@ const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
return found ? oat_method.GetQuickCode() : nullptr;
}
-const void* ClassLinker::GetOatMethodPortableCodeFor(mirror::ArtMethod* method) {
- if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
- return nullptr;
- }
- bool found;
- OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
- return found ? oat_method.GetPortableCode() : nullptr;
-}
-
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
bool found;
@@ -2574,34 +2551,15 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl
return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
}
-const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
- uint32_t method_idx) {
- bool found;
- OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
- if (!found) {
- return nullptr;
- }
- uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
- return oat_class.GetOatMethod(oat_method_idx).GetPortableCode();
-}
-
// Returns true if the method must run with interpreter, false otherwise.
-static bool NeedsInterpreter(
- mirror::ArtMethod* method, const void* quick_code, const void* portable_code)
+static bool NeedsInterpreter(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if ((quick_code == nullptr) && (portable_code == nullptr)) {
+ if (quick_code == nullptr) {
// No code: need interpreter.
// May return true for native code, in the case of generic JNI
// DCHECK(!method->IsNative());
return true;
}
-#ifdef ART_SEA_IR_MODE
- ScopedObjectAccess soa(Thread::Current());
- if (std::string::npos != PrettyMethod(method).find("fibonacci")) {
- LOG(INFO) << "Found " << PrettyMethod(method);
- return false;
- }
-#endif
// If interpreter mode is enabled, every method (except native and proxy) must
// be run with interpreter.
return Runtime::Current()->GetInstrumentation()->InterpretOnly() &&
@@ -2644,37 +2602,22 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
// Only update static methods.
continue;
}
- const void* portable_code = nullptr;
const void* quick_code = nullptr;
if (has_oat_class) {
OatFile::OatMethod oat_method = oat_class.GetOatMethod(method_index);
- portable_code = oat_method.GetPortableCode();
quick_code = oat_method.GetQuickCode();
}
- const bool enter_interpreter = NeedsInterpreter(method, quick_code, portable_code);
- bool have_portable_code = false;
+ const bool enter_interpreter = NeedsInterpreter(method, quick_code);
if (enter_interpreter) {
// Use interpreter entry point.
// Check whether the method is native, in which case it's generic JNI.
- if (quick_code == nullptr && portable_code == nullptr && method->IsNative()) {
+ if (quick_code == nullptr && method->IsNative()) {
quick_code = GetQuickGenericJniStub();
- portable_code = GetPortableToQuickBridge();
} else {
- portable_code = GetPortableToInterpreterBridge();
quick_code = GetQuickToInterpreterBridge();
}
- } else {
- if (portable_code == nullptr) {
- portable_code = GetPortableToQuickBridge();
- } else {
- have_portable_code = true;
- }
- if (quick_code == nullptr) {
- quick_code = GetQuickToPortableBridge();
- }
}
- runtime->GetInstrumentation()->UpdateMethodsCode(method, quick_code, portable_code,
- have_portable_code);
+ runtime->GetInstrumentation()->UpdateMethodsCode(method, quick_code);
}
// Ignore virtual methods on the iterator.
}
@@ -2689,7 +2632,6 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
}
// Method shouldn't have already been linked.
DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
- DCHECK(method->GetEntryPointFromPortableCompiledCode() == nullptr);
if (oat_class != nullptr) {
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
@@ -2699,8 +2641,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
// Install entry point from interpreter.
bool enter_interpreter = NeedsInterpreter(method.Get(),
- method->GetEntryPointFromQuickCompiledCode(),
- method->GetEntryPointFromPortableCompiledCode());
+ method->GetEntryPointFromQuickCompiledCode());
if (enter_interpreter && !method->IsNative()) {
method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
} else {
@@ -2709,33 +2650,21 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
if (method->IsAbstract()) {
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
- method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
return;
}
- bool have_portable_code = false;
if (method->IsStatic() && !method->IsConstructor()) {
// For static methods excluding the class initializer, install the trampoline.
// It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
// after initializing class (see ClassLinker::InitializeClass method).
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
- method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionStub());
} else if (enter_interpreter) {
if (!method->IsNative()) {
// Set entry point from compiled code if there's no code or in interpreter only mode.
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
- method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
} else {
method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
- method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
}
- } else if (method->GetEntryPointFromPortableCompiledCode() != nullptr) {
- DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
- have_portable_code = true;
- method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
- } else {
- DCHECK(method->GetEntryPointFromQuickCompiledCode() != nullptr);
- method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
}
if (method->IsNative()) {
@@ -2753,9 +2682,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
// Allow instrumentation its chance to hijack code.
runtime->GetInstrumentation()->UpdateMethodsCode(method.Get(),
- method->GetEntryPointFromQuickCompiledCode(),
- method->GetEntryPointFromPortableCompiledCode(),
- have_portable_code);
+ method->GetEntryPointFromQuickCompiledCode());
}
@@ -3710,6 +3637,19 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
return false;
}
+ // We may be running with a preopted oat file but without image. In this case,
+ // we don't skip verification of preverified classes to ensure we initialize
+ // dex caches with all types resolved during verification.
+ // We need to trust image classes, as these might be coming out of a pre-opted, quickened boot
+ // image (that we just failed loading), and the verifier can't be run on quickened opcodes when
+ // the runtime isn't started. On the other hand, app classes can be re-verified even if they are
+ // already pre-opted, as then the runtime is started.
+ if (!Runtime::Current()->IsCompiler() &&
+ !Runtime::Current()->GetHeap()->HasImageSpace() &&
+ klass->GetClassLoader() != nullptr) {
+ return false;
+ }
+
uint16_t class_def_index = klass->GetDexClassDefIndex();
oat_file_class_status = oat_dex_file->GetOatClass(class_def_index).GetStatus();
if (oat_file_class_status == mirror::Class::kStatusVerified ||
@@ -4049,7 +3989,6 @@ mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
// At runtime the method looks like a reference and argument saving method, clone the code
// related parameters from this method.
method->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
- method->SetEntryPointFromPortableCompiledCode(GetPortableProxyInvokeHandler());
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
return method;
@@ -5775,8 +5714,7 @@ void ClassLinker::DumpAllClasses(int flags) {
}
}
-static OatFile::OatMethod CreateOatMethod(const void* code, bool is_portable) {
- CHECK_EQ(kUsePortableCompiler, is_portable);
+static OatFile::OatMethod CreateOatMethod(const void* code) {
CHECK(code != nullptr);
const uint8_t* base = reinterpret_cast<const uint8_t*>(code); // Base of data points at code.
base -= sizeof(void*); // Move backward so that code_offset != 0.
@@ -5784,21 +5722,11 @@ static OatFile::OatMethod CreateOatMethod(const void* code, bool is_portable) {
return OatFile::OatMethod(base, code_offset);
}
-bool ClassLinker::IsPortableResolutionStub(const void* entry_point) const {
- return (entry_point == GetPortableResolutionStub()) ||
- (portable_resolution_trampoline_ == entry_point);
-}
-
bool ClassLinker::IsQuickResolutionStub(const void* entry_point) const {
return (entry_point == GetQuickResolutionStub()) ||
(quick_resolution_trampoline_ == entry_point);
}
-bool ClassLinker::IsPortableToInterpreterBridge(const void* entry_point) const {
- return (entry_point == GetPortableToInterpreterBridge());
- // TODO: portable_to_interpreter_bridge_trampoline_ == entry_point;
-}
-
bool ClassLinker::IsQuickToInterpreterBridge(const void* entry_point) const {
return (entry_point == GetQuickToInterpreterBridge()) ||
(quick_to_interpreter_bridge_trampoline_ == entry_point);
@@ -5813,32 +5741,22 @@ const void* ClassLinker::GetRuntimeQuickGenericJniStub() const {
return GetQuickGenericJniStub();
}
-void ClassLinker::SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code,
- bool is_portable) const {
- OatFile::OatMethod oat_method = CreateOatMethod(method_code, is_portable);
+void ClassLinker::SetEntryPointsToCompiledCode(mirror::ArtMethod* method,
+ const void* method_code) const {
+ OatFile::OatMethod oat_method = CreateOatMethod(method_code);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
- // Create bridges to transition between different kinds of compiled bridge.
- if (method->GetEntryPointFromPortableCompiledCode() == nullptr) {
- method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
- } else {
- CHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
- method->SetEntryPointFromQuickCompiledCode(GetQuickToPortableBridge());
- method->SetIsPortableCompiled();
- }
}
void ClassLinker::SetEntryPointsToInterpreter(mirror::ArtMethod* method) const {
if (!method->IsNative()) {
method->SetEntryPointFromInterpreter(artInterpreterToInterpreterBridge);
- method->SetEntryPointFromPortableCompiledCode(GetPortableToInterpreterBridge());
method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
} else {
const void* quick_method_code = GetQuickGenericJniStub();
- OatFile::OatMethod oat_method = CreateOatMethod(quick_method_code, false);
+ OatFile::OatMethod oat_method = CreateOatMethod(quick_method_code);
oat_method.LinkMethod(method);
method->SetEntryPointFromInterpreter(artInterpreterToCompiledCodeBridge);
- method->SetEntryPointFromPortableCompiledCode(GetPortableToQuickBridge());
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 132da675cf..6461835485 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -392,22 +392,16 @@ class ClassLinker {
// Get the oat code for a method when its class isn't yet initialized
const void* GetQuickOatCodeFor(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const void* GetPortableOatCodeFor(mirror::ArtMethod* method, bool* have_portable_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the oat code for a method from a method index.
const void* GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const void* GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get compiled code for a method, return null if no code
// exists. This is unlike Get..OatCodeFor which will return a bridge
// or interpreter entrypoint.
const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const void* GetOatMethodPortableCodeFor(mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
@@ -416,15 +410,9 @@ class ClassLinker {
static const char* GetClassRootDescriptor(ClassRoot class_root);
- // Is the given entry point portable code to run the resolution stub?
- bool IsPortableResolutionStub(const void* entry_point) const;
-
// Is the given entry point quick code to run the resolution stub?
bool IsQuickResolutionStub(const void* entry_point) const;
- // Is the given entry point portable code to bridge into the interpreter?
- bool IsPortableToInterpreterBridge(const void* entry_point) const;
-
// Is the given entry point quick code to bridge into the interpreter?
bool IsQuickToInterpreterBridge(const void* entry_point) const;
@@ -436,8 +424,7 @@ class ClassLinker {
}
// Set the entrypoints up for method to the given code.
- void SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code,
- bool is_portable) const
+ void SetEntryPointsToCompiledCode(mirror::ArtMethod* method, const void* method_code) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Set the entrypoints up for method to the enter the interpreter.
@@ -805,9 +792,7 @@ class ClassLinker {
// Trampolines within the image the bounce to runtime entrypoints. Done so that there is a single
// patch point within the image. TODO: make these proper relocations.
- const void* portable_resolution_trampoline_;
const void* quick_resolution_trampoline_;
- const void* portable_imt_conflict_trampoline_;
const void* quick_imt_conflict_trampoline_;
const void* quick_generic_jni_trampoline_;
const void* quick_to_interpreter_bridge_trampoline_;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index bd0dbaaf24..1b568357f9 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -138,16 +138,6 @@ class CheckJniAbortCatcher {
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
-// TODO: These tests were disabled for portable when we went to having
-// MCLinker link LLVM ELF output because we no longer just have code
-// blobs in memory. We'll need to dlopen to load and relocate
-// temporary output to resurrect these tests.
-#define TEST_DISABLED_FOR_PORTABLE() \
- if (kUsePortableCompiler) { \
- printf("WARNING: TEST DISABLED FOR PORTABLE\n"); \
- return; \
- }
-
// TODO: When heap reference poisoning works with the compiler, get rid of this.
#define TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING() \
if (kPoisonHeapReferences) { \
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 846216c52d..f5b435400f 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -418,6 +418,10 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
break;
}
case Instruction::IGET_QUICK:
+ case Instruction::IGET_BOOLEAN_QUICK:
+ case Instruction::IGET_BYTE_QUICK:
+ case Instruction::IGET_CHAR_QUICK:
+ case Instruction::IGET_SHORT_QUICK:
case Instruction::IGET_WIDE_QUICK:
case Instruction::IGET_OBJECT_QUICK: {
// Since we replaced the field index, we ask the verifier to tell us which
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a9b70cbaa1..7cc52c3063 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4238,10 +4238,15 @@ class HeapChunkContext {
Reset();
}
- static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
+ static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
- reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
+ reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
+ }
+
+ static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
}
private:
@@ -4255,72 +4260,85 @@ class HeapChunkContext {
pieceLenField_ = nullptr;
}
- void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_) {
+ bool IsNative() const {
+ return type_ == CHUNK_TYPE("NHSG");
+ }
+
+ // Returns true if the object is not an empty chunk.
+ bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
// in the following code not to allocate memory, by ensuring buf_ is of the correct size
if (used_bytes == 0) {
- if (start == nullptr) {
- // Reset for start of new heap.
- startOfNextMemoryChunk_ = nullptr;
- Flush();
- }
- // Only process in use memory so that free region information
- // also includes dlmalloc book keeping.
- return;
+ if (start == nullptr) {
+ // Reset for start of new heap.
+ startOfNextMemoryChunk_ = nullptr;
+ Flush();
+ }
+ // Only process in use memory so that free region information
+ // also includes dlmalloc book keeping.
+ return false;
}
-
- /* If we're looking at the native heap, we'll just return
- * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
- */
- bool native = type_ == CHUNK_TYPE("NHSG");
-
- // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
- // count gaps inbetween spaces as free memory.
if (startOfNextMemoryChunk_ != nullptr) {
- // Transmit any pending free memory. Native free memory of
- // over kMaxFreeLen could be because of the use of mmaps, so
- // don't report. If not free memory then start a new segment.
- bool flush = true;
- if (start > startOfNextMemoryChunk_) {
- const size_t kMaxFreeLen = 2 * kPageSize;
- void* freeStart = startOfNextMemoryChunk_;
- void* freeEnd = start;
- size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
- if (!native || freeLen < kMaxFreeLen) {
- AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
- flush = false;
- }
- }
- if (flush) {
- startOfNextMemoryChunk_ = nullptr;
- Flush();
+ // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
+ // of the use of mmaps, so don't report. If not free memory then start a new segment.
+ bool flush = true;
+ if (start > startOfNextMemoryChunk_) {
+ const size_t kMaxFreeLen = 2 * kPageSize;
+ void* free_start = startOfNextMemoryChunk_;
+ void* free_end = start;
+ const size_t free_len =
+ reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
+ if (!IsNative() || free_len < kMaxFreeLen) {
+ AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
+ flush = false;
}
+ }
+ if (flush) {
+ startOfNextMemoryChunk_ = nullptr;
+ Flush();
+ }
}
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
+ return true;
+ }
- // Determine the type of this chunk.
- // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
- // If it's the same, we should combine them.
- uint8_t state = ExamineObject(obj, native);
- AppendChunk(state, start, used_bytes + chunk_overhead_);
- startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
+ void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (ProcessRecord(start, used_bytes)) {
+ uint8_t state = ExamineNativeObject(start);
+ AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
+ startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
+ }
}
- void AppendChunk(uint8_t state, void* ptr, size_t length)
+ void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ if (ProcessRecord(start, used_bytes)) {
+ // Determine the type of this chunk.
+ // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
+ // If it's the same, we should combine them.
+ uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
+ AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
+ startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
+ }
+ }
+
+ void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Make sure there's enough room left in the buffer.
// We need to use two bytes for every fractional 256 allocation units used by the chunk plus
// 17 bytes for any header.
- size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
- size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
- if (bytesLeft < needed) {
+ const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
+ size_t byte_left = &buf_.back() - p_;
+ if (byte_left < needed) {
+ if (is_native) {
+ // Cannot trigger memory allocation while walking native heap.
+ return;
+ }
Flush();
}
- bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
- if (bytesLeft < needed) {
+ byte_left = &buf_.back() - p_;
+ if (byte_left < needed) {
LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
<< needed << " bytes)";
return;
@@ -4338,43 +4356,34 @@ class HeapChunkContext {
*p_++ = length - 1;
}
- uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
+ uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
+ }
+
+ uint8_t ExamineJavaObject(mirror::Object* o)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == nullptr) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
-
// It's an allocated chunk. Figure out what it is.
-
- // If we're looking at the native heap, we'll just return
- // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
- if (is_native_heap) {
- return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
- }
-
- if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ if (!heap->IsLiveObjectLocked(o)) {
+ LOG(ERROR) << "Invalid object in managed heap: " << o;
return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
-
mirror::Class* c = o->GetClass();
if (c == nullptr) {
// The object was probably just created but hasn't been initialized yet.
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
-
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+ if (!heap->IsValidObjectAddress(c)) {
LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
}
-
if (c->IsClassClass()) {
return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
}
-
if (c->IsArrayClass()) {
- if (o->IsObjectArray()) {
- return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
- }
switch (c->GetComponentSize()) {
case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
@@ -4382,7 +4391,6 @@ class HeapChunkContext {
case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
}
}
-
return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
}
@@ -4401,41 +4409,33 @@ class HeapChunkContext {
static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
- HeapChunkContext::HeapChunkCallback(
+ HeapChunkContext::HeapChunkJavaCallback(
obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
}
void Dbg::DdmSendHeapSegments(bool native) {
- Dbg::HpsgWhen when;
- Dbg::HpsgWhat what;
- if (!native) {
- when = gDdmHpsgWhen;
- what = gDdmHpsgWhat;
- } else {
- when = gDdmNhsgWhen;
- what = gDdmNhsgWhat;
- }
+ Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
+ Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
if (when == HPSG_WHEN_NEVER) {
return;
}
-
// Figure out what kind of chunks we'll be sending.
- CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
+ CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
+ << static_cast<int>(what);
// First, send a heap start chunk.
uint8_t heap_id[4];
JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
-
Thread* self = Thread::Current();
-
Locks::mutator_lock_->AssertSharedHeld(self);
// Send a series of heap segment chunks.
- HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
+ HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
if (native) {
#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
- dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
+ dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context);
+ HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context); // Indicate end of a space.
#else
UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
#endif
@@ -4447,7 +4447,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
// dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
// allocation then the first sizeof(size_t) may belong to it.
context.SetChunkOverhead(sizeof(size_t));
- space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
} else if (space->IsRosAllocSpace()) {
context.SetChunkOverhead(0);
// Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
@@ -4457,7 +4457,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
tl->SuspendAll();
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
}
tl->ResumeAll();
self->TransitionFromSuspendedToRunnable();
@@ -4465,6 +4465,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+ HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
} else {
UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
@@ -4473,7 +4474,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Walk the large objects, these are not in the AllocSpace.
context.SetChunkOverhead(0);
- heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
}
// Finally, send a heap end chunk.
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index b304779568..0b54d47ce6 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -21,6 +21,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "dex_file-inl.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/dex_instruction_list.h b/runtime/dex_instruction_list.h
index 05214a4600..a90f424326 100644
--- a/runtime/dex_instruction_list.h
+++ b/runtime/dex_instruction_list.h
@@ -257,10 +257,10 @@
V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
- V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \
- V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \
- V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \
- V(0xF2, UNUSED_F2, "unused-f2", k10x, false, kUnknown, 0, kVerifyError) \
+ V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
+ V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
V(0xF3, UNUSED_F3, "unused-f3", k10x, false, kUnknown, 0, kVerifyError) \
V(0xF4, UNUSED_F4, "unused-f4", k10x, false, kUnknown, 0, kVerifyError) \
V(0xF5, UNUSED_F5, "unused-f5", k10x, false, kUnknown, 0, kVerifyError) \
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 65972359e8..4198905e23 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -488,6 +488,20 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word,
return false;
}
+ // We'd also like to confirm a shstrtab in program_header_only_ mode (else Open() does this for
+ // us). This is usually the last in an oat file, and a good indicator of whether writing was
+ // successful (or the process crashed and left garbage).
+ if (program_header_only_) {
+ // It might not be mapped, but we can compare against the file size.
+ int64_t offset = static_cast<int64_t>(GetHeader().e_shoff +
+ (GetHeader().e_shstrndx * GetHeader().e_shentsize));
+ if (offset >= file_->GetLength()) {
+ *error_msg = StringPrintf("Shstrtab is not in the mapped ELF file: '%s'",
+ file_->GetPath().c_str());
+ return false;
+ }
+ }
+
return true;
}
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
index 3b47f245f7..28e19d414d 100644
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -15,6 +15,7 @@
*/
#include "class_linker.h"
+#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/object-inl.h"
@@ -47,13 +48,9 @@ extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::
}
}
uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
- if (kUsePortableCompiler) {
- InvokeWithShadowFrame(self, shadow_frame, arg_offset, result);
- } else {
- method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
- (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, method->GetShorty());
- }
+ method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
+ (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
+ result, method->GetShorty());
}
} // namespace art
diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
deleted file mode 100644
index de95f7dfbc..0000000000
--- a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-static constexpr gc::AllocatorType kPortableAllocatorType =
- gc::kUseRosAlloc ? gc::kAllocatorTypeRosAlloc : gc::kAllocatorTypeDlMalloc;
-
-extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode<false, true>(type_idx, referrer, thread, kPortableAllocatorType);
-}
-
-extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode<true, true>(type_idx, referrer, thread, kPortableAllocatorType);
-}
-
-extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode<false, true>(type_idx, referrer, length, self,
- kPortableAllocatorType);
-}
-
-extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode<true, true>(type_idx, referrer, length, self,
- kPortableAllocatorType);
-}
-
-extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCodeInstrumented(type_idx, referrer, length, thread, false,
- kPortableAllocatorType);
-}
-
-extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCodeInstrumented(type_idx, referrer, length, thread, true,
- kPortableAllocatorType);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
deleted file mode 100644
index 151b1785c9..0000000000
--- a/runtime/entrypoints/portable/portable_cast_entrypoints.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "common_throws.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-extern "C" int32_t art_portable_is_assignable_from_code(mirror::Class* dest_type,
- mirror::Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type != NULL);
- DCHECK(src_type != NULL);
- return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
-}
-
-extern "C" void art_portable_check_cast_from_code(mirror::Class* dest_type,
- mirror::Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
- DCHECK(src_type->IsClass()) << PrettyClass(src_type);
- if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) {
- ThrowClassCastException(dest_type, src_type);
- }
-}
-
-extern "C" void art_portable_check_put_array_element_from_code(mirror::Object* element,
- mirror::Object* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (element == NULL) {
- return;
- }
- DCHECK(array != NULL);
- mirror::Class* array_class = array->GetClass();
- DCHECK(array_class != NULL);
- mirror::Class* component_type = array_class->GetComponentType();
- mirror::Class* element_class = element->GetClass();
- if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
- ThrowArrayStoreException(element_class, array_class);
- }
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
deleted file mode 100644
index 9364c46abf..0000000000
--- a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "gc/accounting/card_table-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
-}
-
-extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
-}
-
-extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Called when caller isn't guaranteed to have access to a type and the dex cache may be
- // unpopulated
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
-}
-
-extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::ArtMethod* referrer,
- uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveStringFromCode(referrer, string_idx);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
deleted file mode 100644
index 6f77e1c42b..0000000000
--- a/runtime/entrypoints/portable/portable_entrypoints.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
-#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
-
-#include "dex_file-inl.h"
-#include "runtime.h"
-
-namespace art {
-namespace mirror {
- class ArtMethod;
- class Object;
-} // namespace mirror
-class Thread;
-
-#define PORTABLE_ENTRYPOINT_OFFSET(ptr_size, x) \
- Thread::PortableEntryPointOffset<ptr_size>(OFFSETOF_MEMBER(PortableEntryPoints, x))
-
-// Pointers to functions that are called by code generated by compiler's adhering to the portable
-// compiler ABI.
-struct PACKED(4) PortableEntryPoints {
- // Invocation
- void (*pPortableImtConflictTrampoline)(mirror::ArtMethod*);
- void (*pPortableResolutionTrampoline)(mirror::ArtMethod*);
- void (*pPortableToInterpreterBridge)(mirror::ArtMethod*);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc
deleted file mode 100644
index 371aca4b2d..0000000000
--- a/runtime/entrypoints/portable/portable_field_entrypoints.cc
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx,
- referrer,
- StaticPrimitiveWrite,
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode<StaticObjectWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- return 0;
-}
-
-extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- return 0;
-}
-
-extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- field = FindFieldFromCode<StaticObjectRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- return 0;
-}
-
-extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj, uint32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set32<false>(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->Set64<false>(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj,
- mirror::Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode<InstanceObjectWrite, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- // Compiled code can't use transactional mode.
- field->SetObj<false>(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- return 0;
-}
-
-extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- return 0;
-}
-
-extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx,
- mirror::ArtMethod* referrer,
- mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- field = FindFieldFromCode<InstanceObjectRead, true>(field_idx, referrer, Thread::Current(),
- sizeof(mirror::HeapReference<mirror::Object>));
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- return 0;
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
deleted file mode 100644
index afe769e5ec..0000000000
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_instruction.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "mirror/art_method-inl.h"
-
-namespace art {
-
-extern "C" void art_portable_fill_array_data_from_code(mirror::ArtMethod* method,
- uint32_t dex_pc,
- mirror::Array* array,
- uint32_t payload_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(dex_pc);
- const DexFile::CodeItem* code_item = method->GetCodeItem();
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
- FillArrayData(array, payload);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
deleted file mode 100644
index 6f9c083c80..0000000000
--- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/dex_cache-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-template<InvokeType type, bool access_check>
-mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object,
- mirror::ArtMethod* caller_method, Thread* self) {
- mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method,
- access_check, type);
- if (UNLIKELY(method == NULL)) {
- // Note: This can cause thread suspension.
- self->AssertThreadSuspensionIsAllowable();
- method = FindMethodFromCode<type, access_check>(method_idx, &this_object, &caller_method,
- self);
- if (UNLIKELY(method == NULL)) {
- CHECK(self->IsExceptionPending());
- return 0; // failure
- }
- }
- DCHECK(!self->IsExceptionPending());
- const void* code = method->GetEntryPointFromPortableCompiledCode();
-
- // When we return, the caller will branch to this address, so it had better not be 0!
- if (UNLIKELY(code == NULL)) {
- LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
- << " location: " << method->GetDexFile()->GetLocation();
- }
- return method;
-}
-
-// Explicit template declarations of FindMethodHelper for all invoke types.
-#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
- mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx, \
- mirror::Object* this_object, \
- mirror::ArtMethod* caller_method, \
- Thread* thread)
-#define EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(_type) \
- EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, false); \
- EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, true)
-
-EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kStatic);
-EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kDirect);
-EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kVirtual);
-EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kSuper);
-EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(kInterface);
-
-#undef EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL
-#undef EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL
-
-extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kStatic, true>(method_idx, this_object, referrer, thread);
-}
-
-extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kDirect, true>(method_idx, this_object, referrer, thread);
-}
-
-extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kVirtual, true>(method_idx, this_object, referrer, thread);
-}
-
-extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kSuper, true>(method_idx, this_object, referrer, thread);
-}
-
-extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kInterface, true>(method_idx, this_object, referrer, thread);
-}
-
-extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
- mirror::Object* this_object,
- mirror::ArtMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper<kInterface, false>(method_idx, this_object, referrer, thread);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc
deleted file mode 100644
index 0d0f21b795..0000000000
--- a/runtime/entrypoints/portable/portable_jni_entrypoints.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-#include "thread-inl.h"
-
-namespace art {
-
-// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
-extern "C" uint32_t art_portable_jni_method_start(Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) {
- JNIEnvExt* env = self->GetJniEnv();
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
- env->local_ref_cookie = env->locals.GetSegmentState();
- self->TransitionFromRunnableToSuspended(kNative);
- return saved_local_ref_cookie;
-}
-
-extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS {
- self->DecodeJObject(to_lock)->MonitorEnter(self);
- return art_portable_jni_method_start(self);
-}
-
-static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- JNIEnvExt* env = self->GetJniEnv();
- env->locals.SetSegmentState(env->local_ref_cookie);
- env->local_ref_cookie = saved_local_ref_cookie;
-}
-
-extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-
-extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result,
- uint32_t saved_local_ref_cookie,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
- uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- mirror::Object* o = self->DecodeJObject(result);
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc
deleted file mode 100644
index fcd3e9d49e..0000000000
--- a/runtime/entrypoints/portable/portable_lock_entrypoints.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
- DCHECK(obj != nullptr); // Assumed to have been checked before entry.
- obj->MonitorEnter(thread); // May block.
- DCHECK(thread->HoldsLock(obj));
- // Only possible exception is NPE and is handled before entry.
- DCHECK(!thread->IsExceptionPending());
-}
-
-extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
- DCHECK(obj != nullptr); // Assumed to have been checked before entry.
- // MonitorExit may throw exception.
- obj->MonitorExit(thread);
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
deleted file mode 100644
index 95ac66cbec..0000000000
--- a/runtime/entrypoints/portable/portable_thread_entrypoints.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mirror/art_method-inl.h"
-#include "verifier/dex_gc_map.h"
-#include "stack.h"
-#include "thread-inl.h"
-
-namespace art {
-
-class ShadowFrameCopyVisitor : public StackVisitor {
- public:
- explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
- top_frame_(NULL) {}
-
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (IsShadowFrame()) {
- ShadowFrame* cur_frame = GetCurrentShadowFrame();
- size_t num_regs = cur_frame->NumberOfVRegs();
- mirror::ArtMethod* method = cur_frame->GetMethod();
- uint32_t dex_pc = cur_frame->GetDexPC();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
-
- const uint8_t* gc_map = method->GetNativeGcMap(sizeof(void*));
- verifier::DexPcToReferenceMap dex_gc_map(gc_map);
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- if (TestBitmap(reg, reg_bitmap)) {
- new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg));
- } else {
- new_frame->SetVReg(reg, cur_frame->GetVReg(reg));
- }
- }
-
- if (prev_frame_ != NULL) {
- prev_frame_->SetLink(new_frame);
- } else {
- top_frame_ = new_frame;
- }
- prev_frame_ = new_frame;
- }
- return true;
- }
-
- ShadowFrame* GetShadowFrameCopy() {
- return top_frame_;
- }
-
- private:
- static bool TestBitmap(int reg, const uint8_t* reg_vector) {
- return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
- }
-
- ShadowFrame* prev_frame_;
- ShadowFrame* top_frame_;
-};
-
-extern "C" void art_portable_test_suspend_from_code(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- self->CheckSuspend();
- if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
- // Save out the shadow frame to the heap
- ShadowFrameCopyVisitor visitor(self);
- visitor.WalkStack(true);
- self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
- self->SetDeoptimizationReturnValue(JValue());
- self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
- }
-}
-
-extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
- ShadowFrame* new_shadow_frame,
- mirror::ArtMethod* method,
- uint32_t num_vregs) {
- ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
- new_shadow_frame->SetMethod(method);
- new_shadow_frame->SetNumberOfVRegs(num_vregs);
- return old_frame;
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
deleted file mode 100644
index 431735803d..0000000000
--- a/runtime/entrypoints/portable/portable_throw_entrypoints.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "dex_instruction.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-
-namespace art {
-
-extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArithmeticExceptionDivideByZero();
-}
-
-extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArrayIndexOutOfBoundsException(index, length);
-}
-
-extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowNoSuchMethodError(method_idx);
-}
-
-extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: remove dex_pc argument from caller.
- UNUSED(dex_pc);
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionFromDexPC(throw_location);
-}
-
-extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowStackOverflowError(Thread::Current());
-}
-
-extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- if (exception == NULL) {
- ThrowNullPointerException(NULL, "throw with null exception");
- } else {
- self->SetException(throw_location, exception);
- }
-}
-
-extern "C" void* art_portable_get_and_clear_exception(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(self->IsExceptionPending());
- // TODO: make this inline.
- mirror::Throwable* exception = self->GetException(NULL);
- self->ClearException();
- return exception;
-}
-
-extern "C" int32_t art_portable_find_catch_block_from_code(mirror::ArtMethod* current_method,
- uint32_t ti_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current(); // TODO: make an argument.
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- // Check for special deoptimization exception.
- if (UNLIKELY(reinterpret_cast<intptr_t>(exception) == -1)) {
- return -1;
- }
- mirror::Class* exception_type = exception->GetClass();
- StackHandleScope<1> hs(self);
- const DexFile::CodeItem* code_item = current_method->GetCodeItem();
- DCHECK_LT(ti_offset, code_item->tries_size_);
- const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset);
-
- int iter_index = 0;
- int result = -1;
- uint32_t catch_dex_pc = -1;
- // Iterate over the catch handlers associated with dex_pc
- for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) {
- uint16_t iter_type_idx = it.GetHandlerTypeIndex();
- // Catch all case
- if (iter_type_idx == DexFile::kDexNoIndex16) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- // Does this catch exception type apply?
- mirror::Class* iter_exception_type =
- current_method->GetDexCacheResolvedType(iter_type_idx);
- if (UNLIKELY(iter_exception_type == NULL)) {
- // TODO: check, the verifier (class linker?) should take care of resolving all exception
- // classes early.
- LOG(WARNING) << "Unresolved exception class when finding catch block: "
- << current_method->GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- ++iter_index;
- }
- if (result != -1) {
- // Handler found.
- Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(
- self, throw_location, current_method, catch_dex_pc, exception);
- // If the catch block has no move-exception then clear the exception for it.
- const Instruction* first_catch_instr = Instruction::At(
- &current_method->GetCodeItem()->insns_[catch_dex_pc]);
- if (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION) {
- self->ClearException();
- }
- }
- return result;
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
deleted file mode 100644
index 2a2771f31c..0000000000
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
-#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
-
-#include "dex_instruction-inl.h"
-#include "entrypoints/entrypoint_utils-inl.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "interpreter/interpreter.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/object-inl.h"
-#include "scoped_thread_state_change.h"
-
-namespace art {
-
-class ShortyHelper {
- public:
- ShortyHelper(const char* shorty, uint32_t shorty_len, bool is_static)
- : shorty_(shorty), shorty_len_(shorty_len), is_static_(is_static) {
- }
-
- const char* GetShorty() const {
- return shorty_;
- }
-
- uint32_t GetShortyLength() const {
- return shorty_len_;
- }
-
- size_t NumArgs() const {
- // "1 +" because the first in Args is the receiver.
- // "- 1" because we don't count the return type.
- return (is_static_ ? 0 : 1) + GetShortyLength() - 1;
- }
-
- // Get the primitive type associated with the given parameter.
- Primitive::Type GetParamPrimitiveType(size_t param) const {
- CHECK_LT(param, NumArgs());
- if (is_static_) {
- param++; // 0th argument must skip return value at start of the shorty.
- } else if (param == 0) {
- return Primitive::kPrimNot;
- }
- return Primitive::GetType(shorty_[param]);
- }
-
- // Is the specified parameter a long or double, where parameter 0 is 'this' for instance methods.
- bool IsParamALongOrDouble(size_t param) const {
- Primitive::Type type = GetParamPrimitiveType(param);
- return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
- }
-
- // Is the specified parameter a reference, where parameter 0 is 'this' for instance methods.
- bool IsParamAReference(size_t param) const {
- return GetParamPrimitiveType(param) == Primitive::kPrimNot;
- }
-
- private:
- const char* const shorty_;
- const uint32_t shorty_len_;
- const bool is_static_;
-
- DISALLOW_COPY_AND_ASSIGN(ShortyHelper);
-};
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class PortableArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define PORTABLE_STACK_ARG_SKIP 0
-#elif defined(__mips__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define PORTABLE_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-// For x86 there are no register arguments and the stack pointer will point directly to the called
-// method argument passed by the caller.
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define PORTABLE_STACK_ARG_SKIP 4
-#elif defined(__x86_64__)
-// TODO: implement and check these.
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 16
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 96
-#define PORTABLE_STACK_ARG_SKIP 0
-#else
-// TODO: portable should be disabled for aarch64 for now.
-// #error "Unsupported architecture"
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define PORTABLE_STACK_ARG_SKIP 0
-#endif
-
- PortableArgumentVisitor(ShortyHelper& caller_mh, mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- caller_mh_(caller_mh),
- args_in_regs_(ComputeArgsInRegs(caller_mh)),
- num_params_(caller_mh.NumArgs()),
- reg_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
- stack_args_(reinterpret_cast<uint8_t*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
- + PORTABLE_STACK_ARG_SKIP),
- cur_args_(reg_args_),
- cur_arg_index_(0),
- param_index_(0) {
- }
-
- virtual ~PortableArgumentVisitor() {}
-
- virtual void Visit() = 0;
-
- bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamAReference(param_index_);
- }
-
- bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamALongOrDouble(param_index_);
- }
-
- Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.GetParamPrimitiveType(param_index_);
- }
-
- uint8_t* GetParamAddress() const {
- return cur_args_ + (cur_arg_index_ * sizeof(void*));
- }
-
- void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
- break;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- cur_args_ = stack_args_;
- cur_arg_index_ = 0;
- while (param_index_ < num_params_) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
- cur_arg_index_++;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- }
-
- private:
- static size_t ComputeArgsInRegs(ShortyHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if (defined(__i386__))
- UNUSED(mh);
- return 0;
-#else
- size_t args_in_regs = 0;
- size_t num_params = mh.NumArgs();
- for (size_t i = 0; i < num_params; i++) {
- args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
- if (args_in_regs > 3) {
- args_in_regs = 3;
- break;
- }
- }
- return args_in_regs;
-#endif
- }
- ShortyHelper& caller_mh_;
- const size_t args_in_regs_;
- const size_t num_params_;
- uint8_t* const reg_args_;
- uint8_t* const stack_args_;
- uint8_t* cur_args_;
- size_t cur_arg_index_;
- size_t param_index_;
-};
-
-// Visits arguments on the stack placing them into the shadow frame.
-class BuildPortableShadowFrameVisitor : public PortableArgumentVisitor {
- public:
- BuildPortableShadowFrameVisitor(ShortyHelper& caller_mh, mirror::ArtMethod** sp,
- ShadowFrame& sf, size_t first_arg_reg) :
- PortableArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) { }
- virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
- ++cur_reg_;
- break;
- case Primitive::kPrimNot:
- sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
- ++cur_reg_;
- }
-
- private:
- ShadowFrame& sf_;
- size_t cur_reg_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildPortableShadowFrameVisitor);
-};
-
-extern "C" uint64_t artPortableToInterpreterBridge(mirror::ArtMethod* method, Thread* self,
- mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Ensure we don't get thread suspension until the object arguments are safely in the shadow
- // frame.
- // FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
-
- if (method->IsAbstract()) {
- ThrowAbstractMethodError(method);
- return 0;
- } else {
- const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
- StackHandleScope<2> hs(self);
- uint32_t shorty_len;
- const char* shorty = method->GetShorty(&shorty_len);
- ShortyHelper mh(shorty, shorty_len, method->IsStatic());
- const DexFile::CodeItem* code_item = method->GetCodeItem();
- uint16_t num_regs = code_item->registers_size_;
- void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
- ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick.
- method, 0, memory));
- size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
- BuildPortableShadowFrameVisitor shadow_frame_builder(mh, sp,
- *shadow_frame, first_arg_reg);
- shadow_frame_builder.VisitArguments();
- // Push a transition back into managed code onto the linked list in thread.
- ManagedStack fragment;
- self->PushManagedStackFragment(&fragment);
- self->PushShadowFrame(shadow_frame);
- self->EndAssertNoThreadSuspension(old_cause);
-
- if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
- // Ensure static method's class is initialized.
- Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
- if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
- DCHECK(Thread::Current()->IsExceptionPending());
- self->PopManagedStackFragment(fragment);
- return 0;
- }
- }
-
- JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame);
- // Pop transition.
- self->PopManagedStackFragment(fragment);
- return result.GetJ();
- }
-}
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
-class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
- public:
- BuildPortableArgumentVisitor(ShortyHelper& caller_mh, mirror::ArtMethod** sp,
- ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
- PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
- virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- jvalue val;
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimNot: {
- mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
- val.l = soa_.AddLocalReference<jobject>(obj);
- break;
- }
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- val.j = *reinterpret_cast<jlong*>(GetParamAddress());
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- val.i = *reinterpret_cast<jint*>(GetParamAddress());
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- UNREACHABLE();
- }
- args_.push_back(val);
- }
-
- private:
- ScopedObjectAccessUnchecked& soa_;
- std::vector<jvalue>& args_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
-};
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artPortableProxyInvokeHandler(mirror::ArtMethod* proxy_method,
- mirror::Object* receiver,
- Thread* self, mirror::ArtMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- self->VerifyStack();
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
- // Create local ref. copies of proxy method and the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Placing arguments into args vector and remove the receiver.
- uint32_t shorty_len;
- const char* shorty = proxy_method->GetShorty(&shorty_len);
- ShortyHelper proxy_mh(shorty, shorty_len, false);
- std::vector<jvalue> args;
- BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
- local_ref_visitor.VisitArguments();
- args.erase(args.begin());
-
- // Convert proxy method into expected interface method.
- mirror::ArtMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
- // that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
- JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
- rcvr_jobj, interface_method_jobj, args);
- return result.GetJ();
-}
-
-// Lazily resolve a method for portable. Called by stub code.
-extern "C" const void* artPortableResolutionTrampoline(mirror::ArtMethod* called,
- mirror::Object* receiver,
- Thread* self,
- mirror::ArtMethod** called_addr)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t dex_pc;
- mirror::ArtMethod* caller = self->GetCurrentMethod(&dex_pc);
-
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- InvokeType invoke_type;
- bool is_range;
- if (called->IsRuntimeMethod()) {
- const DexFile::CodeItem* code = caller->GetCodeItem();
- CHECK_LT(dex_pc, code->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- switch (instr_code) {
- case Instruction::INVOKE_DIRECT:
- invoke_type = kDirect;
- is_range = false;
- break;
- case Instruction::INVOKE_DIRECT_RANGE:
- invoke_type = kDirect;
- is_range = true;
- break;
- case Instruction::INVOKE_STATIC:
- invoke_type = kStatic;
- is_range = false;
- break;
- case Instruction::INVOKE_STATIC_RANGE:
- invoke_type = kStatic;
- is_range = true;
- break;
- case Instruction::INVOKE_SUPER:
- invoke_type = kSuper;
- is_range = false;
- break;
- case Instruction::INVOKE_SUPER_RANGE:
- invoke_type = kSuper;
- is_range = true;
- break;
- case Instruction::INVOKE_VIRTUAL:
- invoke_type = kVirtual;
- is_range = false;
- break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
- invoke_type = kVirtual;
- is_range = true;
- break;
- case Instruction::INVOKE_INTERFACE:
- invoke_type = kInterface;
- is_range = false;
- break;
- case Instruction::INVOKE_INTERFACE_RANGE:
- invoke_type = kInterface;
- is_range = true;
- break;
- default:
- LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
- // Avoid used uninitialized warnings.
- invoke_type = kDirect;
- is_range = true;
- }
- uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
- called = class_linker->ResolveMethod(Thread::Current(), dex_method_idx, &caller, invoke_type);
- // Incompatible class change should have been handled in resolve method.
- CHECK(!called->CheckIncompatibleClassChange(invoke_type));
- // Refine called method based on receiver.
- if (invoke_type == kVirtual) {
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
- } else if (invoke_type == kInterface) {
- called = receiver->GetClass()->FindVirtualMethodForInterface(called);
- }
- } else {
- CHECK(called->IsStatic()) << PrettyMethod(called);
- invoke_type = kStatic;
- // Incompatible class change should have been handled in resolve method.
- CHECK(!called->CheckIncompatibleClassChange(invoke_type));
- }
- const void* code = nullptr;
- if (LIKELY(!self->IsExceptionPending())) {
- // Ensure that the called method's class is initialized.
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
- class_linker->EnsureInitialized(self, called_class, true, true);
- if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromPortableCompiledCode();
- // TODO: remove this after we solve the link issue.
- if (code == nullptr) {
- bool have_portable_code;
- code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
- }
- } else if (called_class->IsInitializing()) {
- if (invoke_type == kStatic) {
- // Class is still initializing, go to oat and grab code (trampoline must be left in place
- // until class is initialized to stop races between threads).
- bool have_portable_code;
- code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
- } else {
- // No trampoline for non-static methods.
- code = called->GetEntryPointFromPortableCompiledCode();
- // TODO: remove this after we solve the link issue.
- if (code == nullptr) {
- bool have_portable_code;
- code = class_linker->GetPortableOatCodeFor(called, &have_portable_code);
- }
- }
- } else {
- DCHECK(called_class->IsErroneous());
- }
- }
- if (LIKELY(code != nullptr)) {
- // Expect class to at least be initializing.
- DCHECK(called->GetDeclaringClass()->IsInitializing());
- // Don't want infinite recursion.
- DCHECK(!class_linker->IsPortableResolutionStub(code));
- // Set up entry into main method
- *called_addr = called;
- }
- return code;
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index cb816298ad..9db1646f5f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -208,6 +208,22 @@ class QuickArgumentVisitor {
#endif
public:
+ // Special handling for proxy methods. Proxy methods are instance methods so the
+ // 'this' object is the 1st argument. They also have the same frame layout as the
+ // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
+ // 1st GPR.
+ static mirror::Object* GetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(sp->AsMirrorPtr()->IsProxyMethod());
+ CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, sp->AsMirrorPtr()->GetFrameSizeInBytes());
+ CHECK_GT(kNumQuickGprArgs, 0u);
+ constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
+ size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
+ GprIndexToGprOffset(kThisGprIndex);
+ uint8_t* this_arg_address = reinterpret_cast<uint8_t*>(sp) + this_arg_offset;
+ return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
+ }
+
static mirror::ArtMethod* GetCallingMethod(StackReference<mirror::ArtMethod>* sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(sp->AsMirrorPtr()->IsCalleeSaveMethod());
@@ -435,6 +451,13 @@ class QuickArgumentVisitor {
bool is_split_long_or_double_;
};
+// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
+// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
+extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return QuickArgumentVisitor::GetProxyThisObject(sp);
+}
+
// Visits arguments on the stack placing them into the shadow frame.
class BuildQuickShadowFrameVisitor FINAL : public QuickArgumentVisitor {
public:
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index db36a73956..420e8db7c8 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -28,66 +28,30 @@ static inline const void* GetJniDlsymLookupStub() {
return reinterpret_cast<const void*>(art_jni_dlsym_lookup_stub);
}
-// Return the address of portable stub code for handling IMT conflicts.
-extern "C" void art_portable_imt_conflict_trampoline(mirror::ArtMethod*);
-static inline const void* GetPortableImtConflictStub() {
- return reinterpret_cast<const void*>(art_portable_imt_conflict_trampoline);
-}
-
// Return the address of quick stub code for handling IMT conflicts.
extern "C" void art_quick_imt_conflict_trampoline(mirror::ArtMethod*);
static inline const void* GetQuickImtConflictStub() {
return reinterpret_cast<const void*>(art_quick_imt_conflict_trampoline);
}
-// Return the address of portable stub code for bridging from portable code to the interpreter.
-extern "C" void art_portable_to_interpreter_bridge(mirror::ArtMethod*);
-static inline const void* GetPortableToInterpreterBridge() {
- return reinterpret_cast<const void*>(art_portable_to_interpreter_bridge);
-}
-
// Return the address of quick stub code for bridging from quick code to the interpreter.
extern "C" void art_quick_to_interpreter_bridge(mirror::ArtMethod*);
static inline const void* GetQuickToInterpreterBridge() {
return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
}
-// Return the address of portable stub code for bridging from portable code to quick.
-static inline const void* GetPortableToQuickBridge() {
- // TODO: portable to quick bridge. Bug: 8196384
- return GetPortableToInterpreterBridge();
-}
-
-// Return the address of quick stub code for bridging from quick code to portable.
-static inline const void* GetQuickToPortableBridge() {
- // TODO: quick to portable bridge. Bug: 8196384
- return GetQuickToInterpreterBridge();
-}
-
// Return the address of quick stub code for handling JNI calls.
extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*);
static inline const void* GetQuickGenericJniStub() {
return reinterpret_cast<const void*>(art_quick_generic_jni_trampoline);
}
-// Return the address of portable stub code for handling transitions into the proxy invoke handler.
-extern "C" void art_portable_proxy_invoke_handler();
-static inline const void* GetPortableProxyInvokeHandler() {
- return reinterpret_cast<const void*>(art_portable_proxy_invoke_handler);
-}
-
// Return the address of quick stub code for handling transitions into the proxy invoke handler.
extern "C" void art_quick_proxy_invoke_handler();
static inline const void* GetQuickProxyInvokeHandler() {
return reinterpret_cast<const void*>(art_quick_proxy_invoke_handler);
}
-// Return the address of portable stub code for resolving a method at first call.
-extern "C" void art_portable_resolution_trampoline(mirror::ArtMethod*);
-static inline const void* GetPortableResolutionStub() {
- return reinterpret_cast<const void*>(art_portable_resolution_trampoline);
-}
-
// Return the address of quick stub code for resolving a method at first call.
extern "C" void art_quick_resolution_trampoline(mirror::ArtMethod*);
static inline const void* GetQuickResolutionStub() {
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index cfd2a3d0d0..13132632bb 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -150,17 +150,6 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
+ sizeof(void*) == sizeof(JniEntryPoints), JniEntryPoints_all);
}
- void CheckPortableEntryPoints() {
- CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableImtConflictTrampoline) == 0,
- PortableEntryPoints_start_with_imt);
- EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableImtConflictTrampoline,
- pPortableResolutionTrampoline, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(PortableEntryPoints, pPortableResolutionTrampoline,
- pPortableToInterpreterBridge, sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(PortableEntryPoints, pPortableToInterpreterBridge)
- + sizeof(void*) == sizeof(PortableEntryPoints), PortableEntryPoints_all);
- }
-
void CheckQuickEntryPoints() {
CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pAllocArray) == 0,
QuickEntryPoints_start_with_allocarray);
@@ -296,10 +285,6 @@ TEST_F(EntrypointsOrderTest, JniEntryPoints) {
CheckJniEntryPoints();
}
-TEST_F(EntrypointsOrderTest, PortableEntryPoints) {
- CheckPortableEntryPoints();
-}
-
TEST_F(EntrypointsOrderTest, QuickEntryPoints) {
CheckQuickEntryPoints();
}
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 580b541c0d..1770658c0e 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -19,6 +19,7 @@
#include "class_linker.h"
#include "common_runtime_test.h"
#include "dex_file.h"
+#include "dex_file-inl.h"
#include "gtest/gtest.h"
#include "leb128.h"
#include "mirror/class-inl.h"
@@ -174,60 +175,41 @@ TEST_F(ExceptionTest, StackTraceElement) {
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
- if (!kUsePortableCompiler) {
- // Create three fake stack frames with mapping data created in SetUp. We map offset 3 in the
- // code to dex pc 3.
- const uint32_t dex_pc = 3;
+ // Create three fake stack frames with mapping data created in SetUp. We map offset 3 in the
+ // code to dex pc 3.
+ const uint32_t dex_pc = 3;
- // Create the stack frame for the callee save method, expected by the runtime.
- fake_stack.push_back(reinterpret_cast<uintptr_t>(save_method));
- for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t);
- i += sizeof(uintptr_t)) {
- fake_stack.push_back(0);
- }
-
- fake_stack.push_back(method_g_->ToNativeQuickPc(dex_pc)); // return pc
-
- // Create/push fake 16byte stack frame for method g
- fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
+ // Create the stack frame for the callee save method, expected by the runtime.
+ fake_stack.push_back(reinterpret_cast<uintptr_t>(save_method));
+ for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t);
+ i += sizeof(uintptr_t)) {
fake_stack.push_back(0);
- fake_stack.push_back(0);
- fake_stack.push_back(method_f_->ToNativeQuickPc(dex_pc)); // return pc
+ }
- // Create/push fake 16byte stack frame for method f
- fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
- fake_stack.push_back(0);
- fake_stack.push_back(0);
- fake_stack.push_back(0xEBAD6070); // return pc
+ fake_stack.push_back(method_g_->ToNativeQuickPc(dex_pc)); // return pc
- // Push Method* of NULL to terminate the trace
- fake_stack.push_back(0);
+ // Create/push fake 16byte stack frame for method g
+ fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
+ fake_stack.push_back(0);
+ fake_stack.push_back(0);
+ fake_stack.push_back(method_f_->ToNativeQuickPc(dex_pc)); // return pc
- // Push null values which will become null incoming arguments.
- fake_stack.push_back(0);
- fake_stack.push_back(0);
- fake_stack.push_back(0);
+ // Create/push fake 16byte stack frame for method f
+ fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
+ fake_stack.push_back(0);
+ fake_stack.push_back(0);
+ fake_stack.push_back(0xEBAD6070); // return pc
- // Set up thread to appear as if we called out of method_g_ at pc dex 3
- thread->SetTopOfStack(reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]));
- } else {
- // Create/push fake 20-byte shadow frame for method g
- fake_stack.push_back(0);
- fake_stack.push_back(0);
- fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
- fake_stack.push_back(3);
- fake_stack.push_back(0);
+ // Push Method* of NULL to terminate the trace
+ fake_stack.push_back(0);
- // Create/push fake 20-byte shadow frame for method f
- fake_stack.push_back(0);
- fake_stack.push_back(0);
- fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
- fake_stack.push_back(3);
- fake_stack.push_back(0);
+ // Push null values which will become null incoming arguments.
+ fake_stack.push_back(0);
+ fake_stack.push_back(0);
+ fake_stack.push_back(0);
- thread->PushShadowFrame(reinterpret_cast<ShadowFrame*>(&fake_stack[5]));
- thread->PushShadowFrame(reinterpret_cast<ShadowFrame*>(&fake_stack[0]));
- }
+ // Set up thread to appear as if we called out of method_g_ at pc dex 3
+ thread->SetTopOfStack(reinterpret_cast<StackReference<mirror::ArtMethod>*>(&fake_stack[0]));
jobject internal = thread->CreateInternalStackTrace<false>(soa);
ASSERT_TRUE(internal != nullptr);
@@ -253,12 +235,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
EXPECT_STREQ("f", trace_array->Get(1)->GetMethodName()->ToModifiedUtf8().c_str());
EXPECT_EQ(22, trace_array->Get(1)->GetLineNumber());
- if (!kUsePortableCompiler) {
- thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach.
- } else {
- thread->PopShadowFrame();
- thread->PopShadowFrame();
- }
+ thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach.
}
} // namespace art
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index feb9565ccf..f5d3b47d49 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -17,6 +17,7 @@
#include "space_bitmap-inl.h"
#include "base/stringprintf.h"
+#include "dex_file-inl.h"
#include "mem_map.h"
#include "mirror/object-inl.h"
#include "mirror/class.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0fd0a9ff52..d420500592 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -30,6 +30,7 @@
#include "common_throws.h"
#include "cutils/sched_policy.h"
#include "debugger.h"
+#include "dex_file-inl.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap-inl.h"
@@ -76,8 +77,6 @@ namespace gc {
static constexpr size_t kCollectorTransitionStressIterations = 0;
static constexpr size_t kCollectorTransitionStressWait = 10 * 1000; // Microseconds
-static constexpr bool kGCALotMode = false;
-static constexpr size_t kGcAlotInterval = KB;
// Minimum amount of remaining bytes before a concurrent GC is triggered.
static constexpr size_t kMinConcurrentRemainingBytes = 128 * KB;
static constexpr size_t kMaxConcurrentRemainingBytes = 512 * KB;
@@ -99,6 +98,15 @@ static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
static const char* kNonMovingSpaceName = "non moving space";
static const char* kZygoteSpaceName = "zygote space";
static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
+static constexpr bool kGCALotMode = false;
+// GC alot mode uses a small allocation stack to stress test a lot of GC.
+static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
+ sizeof(mirror::HeapReference<mirror::Object>);
+// Verify objet has a small allocation stack size since searching the allocation stack is slow.
+static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
+ sizeof(mirror::HeapReference<mirror::Object>);
+static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
+ sizeof(mirror::HeapReference<mirror::Object>);
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, double foreground_heap_growth_multiplier,
@@ -134,6 +142,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
large_object_threshold_(large_object_threshold),
+ gc_request_pending_(false),
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -165,8 +174,9 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
* verification is enabled, we limit the size of allocation stacks to speed up their
* searching.
*/
- max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
- : (kVerifyObjectSupport > kVerifyObjectModeFast) ? KB : MB),
+ max_allocation_stack_size_(kGCALotMode ? kGcAlotAllocationStackSize
+ : (kVerifyObjectSupport > kVerifyObjectModeFast) ? kVerifyObjectAllocationStackSize :
+ kDefaultAllocationStackSize),
current_allocator_(kAllocatorTypeDlMalloc),
current_non_moving_allocator_(kAllocatorTypeNonMoving),
bump_pointer_space_(nullptr),
@@ -400,6 +410,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ gc_request_lock_ = new Mutex("GC request lock");
+ gc_request_cond_.reset(new ConditionVariable("GC request condition variable", *gc_request_lock_));
heap_trim_request_lock_ = new Mutex("Heap trim request lock");
last_gc_size_ = GetBytesAllocated();
if (ignore_max_footprint_) {
@@ -3029,12 +3041,7 @@ void Heap::RequestConcurrentGC(Thread* self) {
self->IsHandlingStackOverflow()) {
return;
}
- JNIEnv* env = self->GetJniEnv();
- DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
- DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
- env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
- WellKnownClasses::java_lang_Daemons_requestGC);
- CHECK(!env->ExceptionCheck());
+ NotifyConcurrentGCRequest(self);
}
void Heap::ConcurrentGC(Thread* self) {
@@ -3267,5 +3274,21 @@ void Heap::ClearMarkedObjects() {
}
}
+void Heap::WaitForConcurrentGCRequest(Thread* self) {
+ ScopedThreadStateChange tsc(self, kBlocked);
+ MutexLock mu(self, *gc_request_lock_);
+ while (!gc_request_pending_) {
+ gc_request_cond_->Wait(self);
+ }
+ gc_request_pending_ = false;
+}
+
+void Heap::NotifyConcurrentGCRequest(Thread* self) {
+ ScopedThreadStateChange tsc(self, kBlocked);
+ MutexLock mu(self, *gc_request_lock_);
+ gc_request_pending_ = true;
+ gc_request_cond_->Signal(self);
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 4e1a0ff242..529af9539e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -611,6 +611,9 @@ class Heap {
return zygote_space_ != nullptr;
}
+ void WaitForConcurrentGCRequest(Thread* self) LOCKS_EXCLUDED(gc_request_lock_);
+ void NotifyConcurrentGCRequest(Thread* self) LOCKS_EXCLUDED(gc_request_lock_);
+
private:
// Compact source space to target space.
void Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -874,6 +877,11 @@ class Heap {
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
+ // Guards concurrent GC requests.
+ Mutex* gc_request_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> gc_request_cond_ GUARDED_BY(gc_request_lock_);
+ bool gc_request_pending_ GUARDED_BY(gc_request_lock_);
+
// Reference processor;
ReferenceProcessor reference_processor_;
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 012f9f91f5..99bd63fa8a 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -234,7 +234,7 @@ bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference
MutexLock mu(self, *Locks::reference_processor_lock_);
// Wait untul we are done processing reference.
while (SlowPathEnabled()) {
- condition_.Wait(self);
+ condition_.WaitHoldingLocks(self);
}
// At this point, since the sentinel of the reference is live, it is guaranteed to not be
// enqueued if we just finished processing references. Otherwise, we may be doing the main GC
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 4003524e5e..f4efe3c823 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -91,15 +91,30 @@ mirror::Reference* ReferenceQueue::DequeuePendingReference() {
void ReferenceQueue::Dump(std::ostream& os) const {
mirror::Reference* cur = list_;
os << "Reference starting at list_=" << list_ << "\n";
- while (cur != nullptr) {
+ if (cur == nullptr) {
+ return;
+ }
+ do {
mirror::Reference* pending_next = cur->GetPendingNext();
- os << "PendingNext=" << pending_next;
+ os << "Reference= " << cur << " PendingNext=" << pending_next;
if (cur->IsFinalizerReferenceInstance()) {
os << " Zombie=" << cur->AsFinalizerReference()->GetZombie();
}
os << "\n";
cur = pending_next;
+ } while (cur != list_);
+}
+
+size_t ReferenceQueue::GetLength() const {
+ size_t count = 0;
+ mirror::Reference* cur = list_;
+ if (cur != nullptr) {
+ do {
+ ++count;
+ cur = cur->GetPendingNext();
+ } while (cur != list_);
}
+ return count;
}
void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 4ef8478752..f7d89d0543 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -56,12 +56,14 @@ class ReferenceQueue {
// overhead.
void EnqueueReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Enqueue a reference without checking that it is enqueable.
void EnqueuePendingReference(mirror::Reference* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Dequeue the first reference (returns list_).
mirror::Reference* DequeuePendingReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Enqueues finalizer references with white referents. White referents are blackened, moved to the
- // zombie field, and the referent field is cleared.
+ // Enqueues finalizer references with white referents. White referents are blackened, moved to
+ // the zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
IsHeapReferenceMarkedCallback* is_marked_callback,
MarkObjectCallback* mark_object_callback, void* arg)
@@ -73,24 +75,22 @@ class ReferenceQueue {
void ForwardSoftReferences(IsHeapReferenceMarkedCallback* preserve_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Unlink the reference list clearing references objects with white referents. Cleared references
+ // Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
void ClearWhiteReferences(ReferenceQueue* cleared_references,
IsHeapReferenceMarkedCallback* is_marked_callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t GetLength() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsEmpty() const {
return list_ == nullptr;
}
-
void Clear() {
list_ = nullptr;
}
-
- mirror::Reference* GetList() {
+ mirror::Reference* GetList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return list_;
}
@@ -102,7 +102,6 @@ class ReferenceQueue {
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
// calling AtomicEnqueueIfNotEnqueued.
Mutex* const lock_;
-
// The actual reference list. Only a root for the mark compact GC since it will be null for other
// GC types.
mirror::Reference* list_;
diff --git a/runtime/gc/reference_queue_test.cc b/runtime/gc/reference_queue_test.cc
new file mode 100644
index 0000000000..888c0d27ca
--- /dev/null
+++ b/runtime/gc/reference_queue_test.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_runtime_test.h"
+#include "reference_queue.h"
+#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+namespace gc {
+
+class ReferenceQueueTest : public CommonRuntimeTest {};
+
+TEST_F(ReferenceQueueTest, EnqueueDequeue) {
+ Thread* self = Thread::Current();
+ StackHandleScope<20> hs(self);
+ Mutex lock("Reference queue lock");
+ ReferenceQueue queue(&lock);
+ ASSERT_TRUE(queue.IsEmpty());
+ ScopedObjectAccess soa(self);
+ ASSERT_EQ(queue.GetLength(), 0U);
+ auto ref_class = hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
+ NullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(ref_class.Get() != nullptr);
+ auto ref1(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
+ ASSERT_TRUE(ref1.Get() != nullptr);
+ auto ref2(hs.NewHandle(ref_class->AllocObject(self)->AsReference()));
+ ASSERT_TRUE(ref2.Get() != nullptr);
+ // FIFO ordering.
+ queue.EnqueuePendingReference(ref1.Get());
+ ASSERT_TRUE(!queue.IsEmpty());
+ ASSERT_EQ(queue.GetLength(), 1U);
+ queue.EnqueuePendingReference(ref2.Get());
+ ASSERT_TRUE(!queue.IsEmpty());
+ ASSERT_EQ(queue.GetLength(), 2U);
+ ASSERT_EQ(queue.DequeuePendingReference(), ref2.Get());
+ ASSERT_TRUE(!queue.IsEmpty());
+ ASSERT_EQ(queue.GetLength(), 1U);
+ ASSERT_EQ(queue.DequeuePendingReference(), ref1.Get());
+ ASSERT_EQ(queue.GetLength(), 0U);
+ ASSERT_TRUE(queue.IsEmpty());
+}
+
+TEST_F(ReferenceQueueTest, Dump) {
+ Thread* self = Thread::Current();
+ StackHandleScope<20> hs(self);
+ Mutex lock("Reference queue lock");
+ ReferenceQueue queue(&lock);
+ ScopedObjectAccess soa(self);
+ queue.Dump(LOG(INFO));
+ auto weak_ref_class = hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/WeakReference;",
+ NullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(weak_ref_class.Get() != nullptr);
+ auto finalizer_ref_class = hs.NewHandle(
+ Runtime::Current()->GetClassLinker()->FindClass(self, "Ljava/lang/ref/FinalizerReference;",
+ NullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(finalizer_ref_class.Get() != nullptr);
+ auto ref1(hs.NewHandle(weak_ref_class->AllocObject(self)->AsReference()));
+ ASSERT_TRUE(ref1.Get() != nullptr);
+ auto ref2(hs.NewHandle(finalizer_ref_class->AllocObject(self)->AsReference()));
+ ASSERT_TRUE(ref2.Get() != nullptr);
+ queue.EnqueuePendingReference(ref1.Get());
+ queue.Dump(LOG(INFO));
+ queue.EnqueuePendingReference(ref2.Get());
+ queue.Dump(LOG(INFO));
+}
+
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 43a2c59a49..7905bb4854 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -173,7 +173,8 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
// stored in between objects.
// Remaining size is for the new alloc space.
const size_t growth_limit = growth_limit_ - size;
- const size_t capacity = Capacity() - size;
+ // Use mem map limit in case error for clear growth limit.
+ const size_t capacity = NonGrowthLimitCapacity() - size;
VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_) << "\n"
<< "End " << reinterpret_cast<const void*>(End()) << "\n"
<< "Size " << size << "\n"
diff --git a/runtime/globals.h b/runtime/globals.h
index 3104229b17..e531c3a813 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -58,12 +58,6 @@ static constexpr bool kIsTargetBuild = true;
static constexpr bool kIsTargetBuild = false;
#endif
-#if defined(ART_USE_PORTABLE_COMPILER)
-static constexpr bool kUsePortableCompiler = true;
-#else
-static constexpr bool kUsePortableCompiler = false;
-#endif
-
#if defined(ART_USE_OPTIMIZING_COMPILER)
static constexpr bool kUseOptimizingCompiler = true;
#else
@@ -71,7 +65,7 @@ static constexpr bool kUseOptimizingCompiler = false;
#endif
// Garbage collector constants.
-static constexpr bool kMovingCollector = true && !kUsePortableCompiler;
+static constexpr bool kMovingCollector = true;
static constexpr bool kMarkCompactSupport = false && kMovingCollector;
// True if we allow moving field arrays, this can cause complication with mark compact.
static constexpr bool kMoveFieldArrays = !kMarkCompactSupport;
@@ -101,7 +95,11 @@ static constexpr bool kUseBrooksReadBarrier = false;
static constexpr bool kUseBakerOrBrooksReadBarrier = kUseBakerReadBarrier || kUseBrooksReadBarrier;
// If true, references within the heap are poisoned (negated).
+#ifdef ART_HEAP_POISONING
+static constexpr bool kPoisonHeapReferences = true;
+#else
static constexpr bool kPoisonHeapReferences = false;
+#endif
// Kinds of tracing clocks.
enum TraceClockSource {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 639b0f0766..6bc813faf9 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -83,30 +83,15 @@ bool Instrumentation::InstallStubsForClass(mirror::Class* klass) {
return true;
}
-static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code)
+static void UpdateEntrypoints(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- method->SetEntryPointFromPortableCompiledCode(portable_code);
method->SetEntryPointFromQuickCompiledCode(quick_code);
- bool portable_enabled = method->IsPortableCompiled();
- if (have_portable_code && !portable_enabled) {
- method->SetIsPortableCompiled();
- } else if (portable_enabled) {
- method->ClearIsPortableCompiled();
- }
if (!method->IsResolutionMethod()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (class_linker->IsQuickToInterpreterBridge(quick_code) ||
(class_linker->IsQuickResolutionStub(quick_code) &&
Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly() &&
!method->IsNative() && !method->IsProxyMethod())) {
- if (kIsDebugBuild) {
- if (quick_code == GetQuickToInterpreterBridge()) {
- DCHECK(portable_code == GetPortableToInterpreterBridge());
- } else if (class_linker->IsQuickResolutionStub(quick_code)) {
- DCHECK(class_linker->IsPortableResolutionStub(portable_code));
- }
- }
DCHECK(!method->IsNative()) << PrettyMethod(method);
DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
method->SetEntryPointFromInterpreter(art::artInterpreterToInterpreterBridge);
@@ -126,27 +111,21 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;")) {
return;
}
- const void* new_portable_code;
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
bool is_class_initialized = method->GetDeclaringClass()->IsInitialized();
- bool have_portable_code = false;
if (uninstall) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
- new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickToInterpreterBridge();
} else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
new_quick_code = class_linker->GetQuickOatCodeFor(method);
} else {
- new_portable_code = GetPortableResolutionStub();
new_quick_code = GetQuickResolutionStub();
}
} else { // !uninstall
if ((interpreter_stubs_installed_ || forced_interpret_only_ || IsDeoptimized(method)) &&
!method->IsNative()) {
- new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickToInterpreterBridge();
} else {
// Do not overwrite resolution trampoline. When the trampoline initializes the method's
@@ -154,20 +133,17 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) {
// For more details, see ClassLinker::FixupStaticTrampolines.
if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
if (entry_exit_stubs_installed_) {
- new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickInstrumentationEntryPoint();
} else {
- new_portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
new_quick_code = class_linker->GetQuickOatCodeFor(method);
DCHECK(!class_linker->IsQuickToInterpreterBridge(new_quick_code));
}
} else {
- new_portable_code = GetPortableResolutionStub();
new_quick_code = GetQuickResolutionStub();
}
}
}
- UpdateEntrypoints(method, new_quick_code, new_portable_code, have_portable_code);
+ UpdateEntrypoints(method, new_quick_code);
}
// Places the instrumentation exit pc as the return PC for every quick frame. This also allows
@@ -195,7 +171,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
return true; // Ignore upcalls.
}
if (GetCurrentQuickFrame() == NULL) {
- bool interpreter_frame = !m->IsPortableCompiled();
+ bool interpreter_frame = true;
InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
interpreter_frame);
if (kVerboseInstrumentation) {
@@ -654,41 +630,26 @@ void Instrumentation::ResetQuickAllocEntryPoints() {
}
}
-void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code) {
- const void* new_portable_code;
+void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code) {
const void* new_quick_code;
- bool new_have_portable_code;
if (LIKELY(!instrumentation_stubs_installed_)) {
- new_portable_code = portable_code;
new_quick_code = quick_code;
- new_have_portable_code = have_portable_code;
} else {
if ((interpreter_stubs_installed_ || IsDeoptimized(method)) && !method->IsNative()) {
- new_portable_code = GetPortableToInterpreterBridge();
new_quick_code = GetQuickToInterpreterBridge();
- new_have_portable_code = false;
} else {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (class_linker->IsQuickResolutionStub(quick_code) ||
class_linker->IsQuickToInterpreterBridge(quick_code)) {
- DCHECK(class_linker->IsPortableResolutionStub(portable_code) ||
- class_linker->IsPortableToInterpreterBridge(portable_code));
- new_portable_code = portable_code;
new_quick_code = quick_code;
- new_have_portable_code = have_portable_code;
} else if (entry_exit_stubs_installed_) {
new_quick_code = GetQuickInstrumentationEntryPoint();
- new_portable_code = GetPortableToInterpreterBridge();
- new_have_portable_code = false;
} else {
- new_portable_code = portable_code;
new_quick_code = quick_code;
- new_have_portable_code = have_portable_code;
}
}
}
- UpdateEntrypoints(method, new_quick_code, new_portable_code, new_have_portable_code);
+ UpdateEntrypoints(method, new_quick_code);
}
bool Instrumentation::AddDeoptimizedMethod(mirror::ArtMethod* method) {
@@ -761,8 +722,7 @@ void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
<< " is already deoptimized";
}
if (!interpreter_stubs_installed_) {
- UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint(), GetPortableToInterpreterBridge(),
- false);
+ UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint());
// Install instrumentation exit stub and instrumentation frames. We may already have installed
// these previously so it will only cover the newly created frames.
@@ -793,12 +753,10 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (method->IsStatic() && !method->IsConstructor() &&
!method->GetDeclaringClass()->IsInitialized()) {
- UpdateEntrypoints(method, GetQuickResolutionStub(), GetPortableResolutionStub(), false);
+ UpdateEntrypoints(method, GetQuickResolutionStub());
} else {
- bool have_portable_code = false;
const void* quick_code = class_linker->GetQuickOatCodeFor(method);
- const void* portable_code = class_linker->GetPortableOatCodeFor(method, &have_portable_code);
- UpdateEntrypoints(method, quick_code, portable_code, have_portable_code);
+ UpdateEntrypoints(method, quick_code);
}
// If there is no deoptimized method left, we can restore the stack of each thread.
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index effa9f7b2e..2af9a73d8a 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -193,8 +193,7 @@ class Instrumentation {
void ResetQuickAllocEntryPoints() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
// Update the code of a method respecting any installed stubs.
- void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code)
+ void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -217,10 +216,6 @@ class Instrumentation {
return forced_interpret_only_;
}
- bool ShouldPortableCodeDeoptimize() const {
- return instrumentation_stubs_installed_;
- }
-
bool AreExitStubsInstalled() const {
return instrumentation_stubs_installed_;
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 3c7db85395..2a63456c0a 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -145,6 +145,18 @@ bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t in
case Primitive::kPrimInt:
shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset)));
break;
+ case Primitive::kPrimBoolean:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldBoolean(field_offset)));
+ break;
+ case Primitive::kPrimByte:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldByte(field_offset)));
+ break;
+ case Primitive::kPrimChar:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldChar(field_offset)));
+ break;
+ case Primitive::kPrimShort:
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldShort(field_offset)));
+ break;
case Primitive::kPrimLong:
shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset)));
break;
@@ -163,9 +175,13 @@ bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t in
template bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \
uint16_t inst_data)
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimBoolean); // iget-boolean-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimByte); // iget-byte-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimChar); // iget-char-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimShort); // iget-short-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick.
+EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-quick.
#undef EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL
template<Primitive::Type field_type>
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index c332a7b598..c6102633be 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -1249,6 +1249,30 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
}
HANDLE_INSTRUCTION_END();
+ HANDLE_INSTRUCTION_START(IGET_BOOLEAN_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_BYTE_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_CHAR_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
+ HANDLE_INSTRUCTION_START(IGET_SHORT_QUICK) {
+ bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+ }
+ HANDLE_INSTRUCTION_END();
+
HANDLE_INSTRUCTION_START(IGET_WIDE_QUICK) {
bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
@@ -2310,22 +2334,6 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
- HANDLE_INSTRUCTION_START(UNUSED_EF)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F0)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F1)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
- HANDLE_INSTRUCTION_START(UNUSED_F2)
- UnexpectedOpcode(inst, shadow_frame);
- HANDLE_INSTRUCTION_END();
-
HANDLE_INSTRUCTION_START(UNUSED_F3)
UnexpectedOpcode(inst, shadow_frame);
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index f9bbfa17b9..8bbc69481a 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -1128,6 +1128,30 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
}
+ case Instruction::IGET_BOOLEAN_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_BYTE_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_CHAR_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
+ case Instruction::IGET_SHORT_QUICK: {
+ PREAMBLE();
+ bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+ break;
+ }
case Instruction::SGET_BOOLEAN: {
PREAMBLE();
bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst, inst_data);
@@ -2137,7 +2161,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
inst = inst->Next_2xx();
break;
case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
- case Instruction::UNUSED_EF ... Instruction::UNUSED_FF:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
UnexpectedOpcode(inst, shadow_frame);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index dcdbe9d6cb..7f677ab5d3 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -22,6 +22,7 @@
#include "base/mutex.h"
#include "base/stl_util.h"
#include "check_jni.h"
+#include "dex_file-inl.h"
#include "fault_handler.h"
#include "indirect_reference_table-inl.h"
#include "mirror/art_method.h"
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index be34bd3df2..a39a7b76fb 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1345,8 +1345,10 @@ static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
}
break;
default:
- LOG(WARNING) << "GLITCH: unsupported modKind=" << mod.modKind;
- break;
+ LOG(WARNING) << "Unsupported modifier " << mod.modKind << " for event " << pEvent->eventKind;
+ // Free allocated event to avoid leak before leaving.
+ EventFree(pEvent);
+ return JDWP::ERR_NOT_IMPLEMENTED;
}
}
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 62b6b3407f..045fe2f811 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1624,7 +1624,6 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
TEST_F(JniInternalTest, GetPrimitiveField_SetPrimitiveField) {
- TEST_DISABLED_FOR_PORTABLE();
Thread::Current()->TransitionFromSuspendedToRunnable();
LoadDex("AllFields");
bool started = runtime_->Start();
@@ -1655,7 +1654,6 @@ TEST_F(JniInternalTest, GetPrimitiveField_SetPrimitiveField) {
}
TEST_F(JniInternalTest, GetObjectField_SetObjectField) {
- TEST_DISABLED_FOR_PORTABLE();
Thread::Current()->TransitionFromSuspendedToRunnable();
LoadDex("AllFields");
runtime_->Start();
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index c29276a238..7d31148aab 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -24,6 +24,7 @@
#include "class_linker.h"
#include "dex_cache.h"
#include "dex_file.h"
+#include "dex_file-inl.h"
#include "object-inl.h"
#include "object_array.h"
#include "oat.h"
@@ -72,12 +73,7 @@ inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
}
inline uint32_t ArtMethod::GetDexMethodIndex() {
-#ifdef ART_SEA_IR_MODE
- // TODO: Re-add this check for (PORTABLE + SMALL + ) SEA IR when PORTABLE IS fixed!
- // DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
-#else
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
-#endif
return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_));
}
@@ -187,21 +183,11 @@ inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
}
-inline uint32_t ArtMethod::GetPortableOatCodeOffset() {
- DCHECK(!Runtime::Current()->IsStarted());
- return PointerToLowMemUInt32(GetEntryPointFromPortableCompiledCode());
-}
-
inline void ArtMethod::SetQuickOatCodeOffset(uint32_t code_offset) {
DCHECK(!Runtime::Current()->IsStarted());
SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(code_offset));
}
-inline void ArtMethod::SetPortableOatCodeOffset(uint32_t code_offset) {
- DCHECK(!Runtime::Current()->IsStarted());
- SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
-}
-
inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
const void* code_pointer = GetQuickOatCodePointer(pointer_size);
if (code_pointer == nullptr) {
@@ -380,8 +366,7 @@ inline const char* ArtMethod::GetName() {
}
inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
- mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- return method->GetDexFile()->GetCodeItem(method->GetCodeItemOffset());
+ return GetDeclaringClass()->GetDexFile().GetCodeItem(GetCodeItemOffset());
}
inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 1729686314..ff3822a173 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -39,7 +39,6 @@
namespace art {
namespace mirror {
-extern "C" void art_portable_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*, char);
extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
#if defined(__LP64__) || defined(__arm__)
@@ -200,11 +199,13 @@ uint32_t ArtMethod::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfil
}
uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
- if (IsPortableCompiled()) {
- // Portable doesn't use the machine pc, we just use dex pc instead.
- return static_cast<uint32_t>(pc);
- }
const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+ if (IsOptimized(sizeof(void*))) {
+ uint32_t ret = GetStackMap(sought_offset).GetDexPc();
+ return ret;
+ }
+
MappingTable table(entry_point != nullptr ?
GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
if (table.TotalSize() == 0) {
@@ -213,7 +214,6 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
return DexFile::kDexNoIndex; // Special no mapping case
}
- uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
// Assume the caller wants a pc-to-dex mapping so check here first.
typedef MappingTable::PcToDexIterator It;
for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
@@ -347,19 +347,12 @@ void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
bool ArtMethod::IsEntrypointInterpreter() {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (!IsPortableCompiled()) { // Quick.
- const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
- return oat_quick_code == nullptr ||
- oat_quick_code != GetEntryPointFromQuickCompiledCode();
- } else { // Portable.
- const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
- return oat_portable_code == nullptr ||
- oat_portable_code != GetEntryPointFromPortableCompiledCode();
- }
+ const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
+ return oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode();
}
const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
- if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
+ if (IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -412,34 +405,27 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
} else {
const bool kLogInvocationStartAndReturn = false;
bool have_quick_code = GetEntryPointFromQuickCompiledCode() != nullptr;
- bool have_portable_code = GetEntryPointFromPortableCompiledCode() != nullptr;
- if (LIKELY(have_quick_code || have_portable_code)) {
+ if (LIKELY(have_quick_code)) {
if (kLogInvocationStartAndReturn) {
- LOG(INFO) << StringPrintf("Invoking '%s' %s code=%p", PrettyMethod(this).c_str(),
- have_quick_code ? "quick" : "portable",
- have_quick_code ? GetEntryPointFromQuickCompiledCode()
- : GetEntryPointFromPortableCompiledCode());
+ LOG(INFO) << StringPrintf("Invoking '%s' quick code=%p", PrettyMethod(this).c_str(),
+ GetEntryPointFromQuickCompiledCode());
}
- // Ensure that we won't be accidentally calling quick/portable compiled code when -Xint.
+ // Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
CHECK(IsEntrypointInterpreter())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
- if (!IsPortableCompiled()) {
#if defined(__LP64__) || defined(__arm__)
- if (!IsStatic()) {
- (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
- } else {
- (*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);
- }
-#else
+ if (!IsStatic()) {
(*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
-#endif
} else {
- (*art_portable_invoke_stub)(this, args, args_size, self, result, shorty[0]);
+ (*art_quick_invoke_static_stub)(this, args, args_size, self, result, shorty);
}
+#else
+ (*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
+#endif
if (UNLIKELY(self->GetException(nullptr) == Thread::GetDeoptimizationException())) {
// Unusual case where we were running generated code and an
// exception was thrown to force the activations to be removed from the
@@ -451,10 +437,8 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
interpreter::EnterInterpreterFromDeoptimize(self, shadow_frame, result);
}
if (kLogInvocationStartAndReturn) {
- LOG(INFO) << StringPrintf("Returned '%s' %s code=%p", PrettyMethod(this).c_str(),
- have_quick_code ? "quick" : "portable",
- have_quick_code ? GetEntryPointFromQuickCompiledCode()
- : GetEntryPointFromPortableCompiledCode());
+ LOG(INFO) << StringPrintf("Returned '%s' quick code=%p", PrettyMethod(this).c_str(),
+ GetEntryPointFromQuickCompiledCode());
}
} else {
LOG(INFO) << "Not invoking '" << PrettyMethod(this) << "' code=null";
@@ -484,10 +468,6 @@ static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
}
QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
- if (UNLIKELY(IsPortableCompiled())) {
- // Portable compiled dex bytecode or jni stub.
- return QuickMethodFrameInfo(kStackAlignment, 0u, 0u);
- }
Runtime* runtime = Runtime::Current();
if (UNLIKELY(IsAbstract())) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 210794488d..29e061a7db 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -20,6 +20,7 @@
#include "dex_file.h"
#include "gc_root.h"
#include "invoke_type.h"
+#include "method_reference.h"
#include "modifiers.h"
#include "object.h"
#include "object_callbacks.h"
@@ -151,25 +152,12 @@ class MANAGED ArtMethod FINAL : public Object {
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
- return GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+ return !IsNative()
+ && GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
&& GetQuickOatCodePointer(pointer_size) != nullptr
&& GetNativeGcMap(pointer_size) == nullptr;
}
- bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (GetAccessFlags() & kAccPortableCompiled) != 0;
- }
-
- void SetIsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(!IsPortableCompiled());
- SetAccessFlags(GetAccessFlags() | kAccPortableCompiled);
- }
-
- void ClearIsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsPortableCompiled());
- SetAccessFlags(GetAccessFlags() & ~kAccPortableCompiled);
- }
-
bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -282,42 +270,6 @@ class MANAGED ArtMethod FINAL : public Object {
EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
}
- ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) {
- return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
- PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size);
- }
-
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromPortableCompiledCode()
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
- return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*));
- }
-
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<const void*, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size);
- }
-
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckObjectSizeEqualsMirrorSize();
- return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code,
- sizeof(void*));
- }
-
- template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromPortableCompiledCodePtrSize(
- const void* entry_point_from_portable_compiled_code, size_t pointer_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldPtrWithSize<false, true, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(pointer_size),
- entry_point_from_portable_compiled_code, pointer_size);
- }
-
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CheckObjectSizeEqualsMirrorSize();
@@ -374,9 +326,7 @@ class MANAGED ArtMethod FINAL : public Object {
bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t GetPortableOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetPortableOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
@@ -521,6 +471,10 @@ class MANAGED ArtMethod FINAL : public Object {
uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return MethodReference(GetDexFile(), GetDexMethodIndex());
+ }
+
// Find the catch block for the given exception type and dex_pc. When a catch block is found,
// indicates whether the found catch block is responsible for clearing the exception or whether
// a move-exception instruction is present.
@@ -642,12 +596,8 @@ class MANAGED ArtMethod FINAL : public Object {
void* entry_point_from_jni_;
// Method dispatch from quick compiled code invokes this pointer which may cause bridging into
- // portable compiled code or the interpreter.
+ // the interpreter.
void* entry_point_from_quick_compiled_code_;
-
- // Method dispatch from portable compiled code invokes this pointer which may cause bridging
- // into quick compiled code or the interpreter. Last to simplify entrypoint logic.
- void* entry_point_from_portable_compiled_code_;
} ptr_sized_fields_;
static GcRoot<Class> java_lang_reflect_ArtMethod_;
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 1662ebfe82..3dc9e08a17 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -514,7 +514,15 @@ inline uint32_t Class::GetAccessFlags() {
IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>() ||
this == String::GetJavaLangString() ||
this == ArtField::GetJavaLangReflectArtField() ||
- this == ArtMethod::GetJavaLangReflectArtMethod());
+ this == ArtMethod::GetJavaLangReflectArtMethod())
+ << "IsIdxLoaded=" << IsIdxLoaded<kVerifyFlags>()
+ << " IsRetired=" << IsRetired<kVerifyFlags>()
+ << " IsErroneous=" <<
+ IsErroneous<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()
+ << " IsString=" << (this == String::GetJavaLangString())
+ << " IsArtField=" << (this == ArtField::GetJavaLangReflectArtField())
+ << " IsArtMethod=" << (this == ArtMethod::GetJavaLangReflectArtMethod())
+ << " descriptor=" << PrettyDescriptor(this);
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 65d6adedcf..8bccd9e43c 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -24,6 +24,7 @@
#include "class.h"
#include "class-inl.h"
#include "class_linker-inl.h"
+#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
#include "iftable-inl.h"
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 9d789cd0cc..ae5f60acae 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -77,10 +77,6 @@ class ObjectTest : public CommonRuntimeTest {
TEST_F(ObjectTest, Constants) {
EXPECT_EQ(kObjectReferenceSize, sizeof(HeapReference<Object>));
EXPECT_EQ(kObjectHeaderSize, sizeof(Object));
- EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_32,
- ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value());
- EXPECT_EQ(MIRROR_ART_METHOD_PORTABLE_CODE_OFFSET_64,
- ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value());
EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_32,
ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
EXPECT_EQ(MIRROR_ART_METHOD_QUICK_CODE_OFFSET_64,
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 23c18f86ff..09dc78ad49 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -46,7 +46,6 @@ static constexpr uint32_t kAccClassIsProxy = 0x00040000; // class (dex
static constexpr uint32_t kAccPreverified = 0x00080000; // class (runtime),
// method (dex only)
static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
-static constexpr uint32_t kAccPortableCompiled = 0x00100000; // method (dex only)
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
// Special runtime-only flags.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index d40d64b437..a348432340 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -221,6 +221,13 @@ static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env));
}
+static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) {
+ Runtime::Current()->GetHeap()->NotifyConcurrentGCRequest(ThreadForEnv(env));
+}
+static void VMRuntime_waitForConcurrentGCRequest(JNIEnv* env, jobject) {
+ Runtime::Current()->GetHeap()->WaitForConcurrentGCRequest(ThreadForEnv(env));
+}
+
typedef std::map<std::string, mirror::String*> StringTable;
static void PreloadDexCachesStringsCallback(mirror::Object** root, void* arg,
@@ -559,6 +566,8 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, classPath, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, clearGrowthLimit, "()V"),
NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
+ NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
+ NATIVE_METHOD(VMRuntime, waitForConcurrentGCRequest, "()V"),
NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
NATIVE_METHOD(VMRuntime, isDebuggerActive, "!()Z"),
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 1775468688..b7f31f2005 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -20,6 +20,7 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "dex_file-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/oat.cc b/runtime/oat.cc
index eab34f7e17..c223e2e0d8 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -20,12 +20,13 @@
#include <zlib.h>
#include "arch/instruction_set_features.h"
+#include "base/stringprintf.h"
#include "utils.h"
namespace art {
-const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '5', '1', '\0' };
+constexpr uint8_t OatHeader::kOatMagic[4];
+constexpr uint8_t OatHeader::kOatVersion[4];
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -67,6 +68,13 @@ OatHeader::OatHeader(InstructionSet instruction_set,
uint32_t image_file_location_oat_checksum,
uint32_t image_file_location_oat_data_begin,
const SafeMap<std::string, std::string>* variable_data) {
+ // Don't want asserts in header as they would be checked in each file that includes it. But the
+ // fields are private, so we check inside a method.
+ static_assert(sizeof(magic_) == sizeof(kOatMagic),
+ "Oat magic and magic_ have different lengths.");
+ static_assert(sizeof(version_) == sizeof(kOatVersion),
+ "Oat version and version_ have different lengths.");
+
memcpy(magic_, kOatMagic, sizeof(kOatMagic));
memcpy(version_, kOatVersion, sizeof(kOatVersion));
executable_offset_ = 0;
@@ -105,9 +113,6 @@ OatHeader::OatHeader(InstructionSet instruction_set,
interpreter_to_interpreter_bridge_offset_ = 0;
interpreter_to_compiled_code_bridge_offset_ = 0;
jni_dlsym_lookup_offset_ = 0;
- portable_imt_conflict_trampoline_offset_ = 0;
- portable_resolution_trampoline_offset_ = 0;
- portable_to_interpreter_bridge_offset_ = 0;
quick_generic_jni_trampoline_offset_ = 0;
quick_imt_conflict_trampoline_offset_ = 0;
quick_resolution_trampoline_offset_ = 0;
@@ -130,6 +135,28 @@ bool OatHeader::IsValid() const {
return true;
}
+std::string OatHeader::GetValidationErrorMessage() const {
+ if (memcmp(magic_, kOatMagic, sizeof(kOatMagic)) != 0) {
+ static_assert(sizeof(kOatMagic) == 4, "kOatMagic has unexpected length");
+ return StringPrintf("Invalid oat magic, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+ kOatMagic[0], kOatMagic[1], kOatMagic[2], kOatMagic[3],
+ magic_[0], magic_[1], magic_[2], magic_[3]);
+ }
+ if (memcmp(version_, kOatVersion, sizeof(kOatVersion)) != 0) {
+ static_assert(sizeof(kOatVersion) == 4, "kOatVersion has unexpected length");
+ return StringPrintf("Invalid oat version, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+ kOatVersion[0], kOatVersion[1], kOatVersion[2], kOatVersion[3],
+ version_[0], version_[1], version_[2], version_[3]);
+ }
+ if (!IsAligned<kPageSize>(executable_offset_)) {
+ return "Executable offset not page-aligned.";
+ }
+ if (!IsAligned<kPageSize>(image_patch_delta_)) {
+ return "Image patch delta not page-aligned.";
+ }
+ return "";
+}
+
const char* OatHeader::GetMagic() const {
CHECK(IsValid());
return reinterpret_cast<const char*>(magic_);
@@ -231,75 +258,18 @@ void OatHeader::SetJniDlsymLookupOffset(uint32_t offset) {
UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(offset));
}
-const void* OatHeader::GetPortableImtConflictTrampoline() const {
- return reinterpret_cast<const uint8_t*>(this) + GetPortableImtConflictTrampolineOffset();
-}
-
-uint32_t OatHeader::GetPortableImtConflictTrampolineOffset() const {
- DCHECK(IsValid());
- CHECK_GE(portable_imt_conflict_trampoline_offset_, jni_dlsym_lookup_offset_);
- return portable_imt_conflict_trampoline_offset_;
-}
-
-void OatHeader::SetPortableImtConflictTrampolineOffset(uint32_t offset) {
- CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
- DCHECK(IsValid());
- DCHECK_EQ(portable_imt_conflict_trampoline_offset_, 0U) << offset;
-
- portable_imt_conflict_trampoline_offset_ = offset;
- UpdateChecksum(&portable_imt_conflict_trampoline_offset_, sizeof(offset));
-}
-
-const void* OatHeader::GetPortableResolutionTrampoline() const {
- return reinterpret_cast<const uint8_t*>(this) + GetPortableResolutionTrampolineOffset();
-}
-
-uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const {
- DCHECK(IsValid());
- CHECK_GE(portable_resolution_trampoline_offset_, portable_imt_conflict_trampoline_offset_);
- return portable_resolution_trampoline_offset_;
-}
-
-void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) {
- CHECK(offset == 0 || offset >= portable_imt_conflict_trampoline_offset_);
- DCHECK(IsValid());
- DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset;
-
- portable_resolution_trampoline_offset_ = offset;
- UpdateChecksum(&portable_resolution_trampoline_offset_, sizeof(offset));
-}
-
-const void* OatHeader::GetPortableToInterpreterBridge() const {
- return reinterpret_cast<const uint8_t*>(this) + GetPortableToInterpreterBridgeOffset();
-}
-
-uint32_t OatHeader::GetPortableToInterpreterBridgeOffset() const {
- DCHECK(IsValid());
- CHECK_GE(portable_to_interpreter_bridge_offset_, portable_resolution_trampoline_offset_);
- return portable_to_interpreter_bridge_offset_;
-}
-
-void OatHeader::SetPortableToInterpreterBridgeOffset(uint32_t offset) {
- CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
- DCHECK(IsValid());
- DCHECK_EQ(portable_to_interpreter_bridge_offset_, 0U) << offset;
-
- portable_to_interpreter_bridge_offset_ = offset;
- UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset));
-}
-
const void* OatHeader::GetQuickGenericJniTrampoline() const {
return reinterpret_cast<const uint8_t*>(this) + GetQuickGenericJniTrampolineOffset();
}
uint32_t OatHeader::GetQuickGenericJniTrampolineOffset() const {
DCHECK(IsValid());
- CHECK_GE(quick_generic_jni_trampoline_offset_, portable_to_interpreter_bridge_offset_);
+ CHECK_GE(quick_generic_jni_trampoline_offset_, jni_dlsym_lookup_offset_);
return quick_generic_jni_trampoline_offset_;
}
void OatHeader::SetQuickGenericJniTrampolineOffset(uint32_t offset) {
- CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
+ CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
DCHECK(IsValid());
DCHECK_EQ(quick_generic_jni_trampoline_offset_, 0U) << offset;
diff --git a/runtime/oat.h b/runtime/oat.h
index 11ed4fb126..f218482050 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
- static const uint8_t kOatMagic[4];
- static const uint8_t kOatVersion[4];
+ static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
+ static constexpr uint8_t kOatVersion[] = { '0', '4', '5', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
@@ -47,6 +47,7 @@ class PACKED(4) OatHeader {
const SafeMap<std::string, std::string>* variable_data);
bool IsValid() const;
+ std::string GetValidationErrorMessage() const;
const char* GetMagic() const;
uint32_t GetChecksum() const;
void UpdateChecksum(const void* data, size_t length);
@@ -68,16 +69,6 @@ class PACKED(4) OatHeader {
uint32_t GetJniDlsymLookupOffset() const;
void SetJniDlsymLookupOffset(uint32_t offset);
- const void* GetPortableResolutionTrampoline() const;
- uint32_t GetPortableResolutionTrampolineOffset() const;
- void SetPortableResolutionTrampolineOffset(uint32_t offset);
- const void* GetPortableImtConflictTrampoline() const;
- uint32_t GetPortableImtConflictTrampolineOffset() const;
- void SetPortableImtConflictTrampolineOffset(uint32_t offset);
- const void* GetPortableToInterpreterBridge() const;
- uint32_t GetPortableToInterpreterBridgeOffset() const;
- void SetPortableToInterpreterBridgeOffset(uint32_t offset);
-
const void* GetQuickGenericJniTrampoline() const;
uint32_t GetQuickGenericJniTrampolineOffset() const;
void SetQuickGenericJniTrampolineOffset(uint32_t offset);
@@ -129,9 +120,6 @@ class PACKED(4) OatHeader {
uint32_t interpreter_to_interpreter_bridge_offset_;
uint32_t interpreter_to_compiled_code_bridge_offset_;
uint32_t jni_dlsym_lookup_offset_;
- uint32_t portable_imt_conflict_trampoline_offset_;
- uint32_t portable_resolution_trampoline_offset_;
- uint32_t portable_to_interpreter_bridge_offset_;
uint32_t quick_generic_jni_trampoline_offset_;
uint32_t quick_imt_conflict_trampoline_offset_;
uint32_t quick_resolution_trampoline_offset_;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 91e571b7c3..1c6cc8bf8c 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -75,32 +75,26 @@ OatFile* OatFile::Open(const std::string& filename,
CHECK(!filename.empty()) << location;
CheckLocation(location);
std::unique_ptr<OatFile> ret;
- if (kUsePortableCompiler && executable) {
- // If we are using PORTABLE, use dlopen to deal with relocations.
- //
- // We use our own ELF loader for Quick to deal with legacy apps that
- // open a generated dex file by name, remove the file, then open
- // another generated dex file with the same name. http://b/10614658
- ret.reset(OpenDlopen(filename, location, requested_base, error_msg));
- } else {
- // If we aren't trying to execute, we just use our own ElfFile loader for a couple reasons:
- //
- // On target, dlopen may fail when compiling due to selinux restrictions on installd.
- //
- // On host, dlopen is expected to fail when cross compiling, so fall back to OpenElfFile.
- // This won't work for portable runtime execution because it doesn't process relocations.
- std::unique_ptr<File> file(OS::OpenFileForReading(filename.c_str()));
- if (file.get() == NULL) {
- *error_msg = StringPrintf("Failed to open oat filename for reading: %s", strerror(errno));
- return nullptr;
- }
- ret.reset(OpenElfFile(file.get(), location, requested_base, oat_file_begin, false, executable,
- error_msg));
-
- // It would be nice to unlink here. But we might have opened the file created by the
- // ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
- // to allow removal when we know the ELF must be borked.
+ // If we aren't trying to execute, we just use our own ElfFile loader for a couple reasons:
+ //
+ // On target, dlopen may fail when compiling due to selinux restrictions on installd.
+ //
+ // We use our own ELF loader for Quick to deal with legacy apps that
+ // open a generated dex file by name, remove the file, then open
+ // another generated dex file with the same name. http://b/10614658
+ //
+ // On host, dlopen is expected to fail when cross compiling, so fall back to OpenElfFile.
+ std::unique_ptr<File> file(OS::OpenFileForReading(filename.c_str()));
+ if (file.get() == NULL) {
+ *error_msg = StringPrintf("Failed to open oat filename for reading: %s", strerror(errno));
+ return nullptr;
}
+ ret.reset(OpenElfFile(file.get(), location, requested_base, oat_file_begin, false, executable,
+ error_msg));
+
+ // It would be nice to unlink here. But we might have opened the file created by the
+ // ScopedLock, which we better not delete to avoid races. TODO: Investigate how to fix the API
+ // to allow removal when we know the ELF must be borked.
return ret.release();
}
@@ -233,7 +227,9 @@ bool OatFile::ElfFileOpen(File* file, uint8_t* requested_base, uint8_t* oat_file
bool OatFile::Setup(std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
- *error_msg = StringPrintf("Invalid oat magic for '%s'", GetLocation().c_str());
+ std::string cause = GetOatHeader().GetValidationErrorMessage();
+ *error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
+ cause.c_str());
return false;
}
const uint8_t* oat = Begin();
@@ -591,7 +587,6 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
void OatFile::OatMethod::LinkMethod(mirror::ArtMethod* method) const {
CHECK(method != NULL);
- method->SetEntryPointFromPortableCompiledCode(GetPortableCode());
method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index a335c945db..831ba1ec77 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -97,29 +97,8 @@ class OatFile {
return code_offset_;
}
- const void* GetPortableCode() const {
- // TODO: encode whether code is portable/quick in flags within OatMethod.
- if (kUsePortableCompiler) {
- return GetOatPointer<const void*>(code_offset_);
- } else {
- return nullptr;
- }
- }
-
const void* GetQuickCode() const {
- if (kUsePortableCompiler) {
- return nullptr;
- } else {
- return GetOatPointer<const void*>(code_offset_);
- }
- }
-
- // Returns 0.
- uint32_t GetPortableCodeSize() const {
- // TODO: With Quick, we store the size before the code. With Portable, the code is in a .o
- // file we don't manage ourselves. ELF symbols do have a concept of size, so we could capture
- // that and store it somewhere, such as the OatMethod.
- return 0;
+ return GetOatPointer<const void*>(code_offset_);
}
// Returns size of quick code.
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3e6c86b7c9..1b992d5159 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -691,11 +691,16 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
return false;
}
}
- // If not set, background collector type defaults to homogeneous compaction
- // if not low memory mode, semispace otherwise.
+ // If not set, background collector type defaults to homogeneous compaction.
+ // If foreground is GSS, use GSS as background collector.
+ // If not low memory mode, semispace otherwise.
if (background_collector_type_ == gc::kCollectorTypeNone) {
- background_collector_type_ = low_memory_mode_ ?
- gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ if (collector_type_ != gc::kCollectorTypeGSS) {
+ background_collector_type_ = low_memory_mode_ ?
+ gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
+ } else {
+ background_collector_type_ = collector_type_;
+ }
}
// If a reference to the dalvik core.jar snuck in, replace it with
@@ -722,9 +727,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
if (heap_growth_limit_ == 0) {
heap_growth_limit_ = heap_maximum_size_;
}
- if (background_collector_type_ == gc::kCollectorTypeNone) {
- background_collector_type_ = collector_type_;
- }
return true;
} // NOLINT(readability/fn_size)
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index e399195008..b3da1346ff 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -44,9 +44,7 @@
#include "cutils/properties.h"
#endif
-#if !defined(ART_USE_PORTABLE_COMPILER)
#include "entrypoints/quick/quick_entrypoints.h"
-#endif
namespace art {
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 3415e8f9ba..d65b2d5241 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -15,6 +15,7 @@
*/
#include "inline_method_analyser.h"
+#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
#include "mirror/art_field.h"
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index cf1ecbf29a..31622de984 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -32,7 +32,7 @@ class Thread;
class ThrowLocation;
class ShadowFrame;
-// Manages exception delivery for Quick backend. Not used by Portable backend.
+// Manages exception delivery for Quick backend.
class QuickExceptionHandler {
public:
QuickExceptionHandler(Thread* self, bool is_deoptimization)
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 01c5070869..c917d844ae 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -72,39 +72,29 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::
}
struct ObjectComparator {
- bool operator()(GcRoot<mirror::Object> root1, GcRoot<mirror::Object> root2)
+ bool operator()(GcRoot<mirror::Object> root1, GcRoot<mirror::Object> root2) const
// TODO: enable analysis when analysis can work with the STL.
NO_THREAD_SAFETY_ANALYSIS {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
mirror::Object* obj1 = root1.Read<kWithoutReadBarrier>();
mirror::Object* obj2 = root2.Read<kWithoutReadBarrier>();
- // Ensure null references and cleared jweaks appear at the end.
- if (obj1 == NULL) {
- return true;
- } else if (obj2 == NULL) {
- return false;
- }
+ DCHECK(obj1 != nullptr);
+ DCHECK(obj2 != nullptr);
Runtime* runtime = Runtime::Current();
- if (runtime->IsClearedJniWeakGlobal(obj1)) {
- return true;
- } else if (runtime->IsClearedJniWeakGlobal(obj2)) {
- return false;
- }
-
+ DCHECK(!runtime->IsClearedJniWeakGlobal(obj1));
+ DCHECK(!runtime->IsClearedJniWeakGlobal(obj2));
// Sort by class...
if (obj1->GetClass() != obj2->GetClass()) {
- return obj1->GetClass()->IdentityHashCode() < obj2->IdentityHashCode();
- } else {
- // ...then by size...
- size_t count1 = obj1->SizeOf();
- size_t count2 = obj2->SizeOf();
- if (count1 != count2) {
- return count1 < count2;
- } else {
- // ...and finally by identity hash code.
- return obj1->IdentityHashCode() < obj2->IdentityHashCode();
- }
+ return obj1->GetClass()->IdentityHashCode() < obj2->GetClass()->IdentityHashCode();
+ }
+ // ...then by size...
+ const size_t size1 = obj1->SizeOf();
+ const size_t size2 = obj2->SizeOf();
+ if (size1 != size2) {
+ return size1 < size2;
}
+ // ...and finally by identity hash code.
+ return obj1->IdentityHashCode() < obj2->IdentityHashCode();
}
};
@@ -166,16 +156,17 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
first = 0;
}
os << " Last " << (count - first) << " entries (of " << count << "):\n";
+ Runtime* runtime = Runtime::Current();
for (int idx = count - 1; idx >= first; --idx) {
mirror::Object* ref = entries[idx].Read();
- if (ref == NULL) {
+ if (ref == nullptr) {
continue;
}
- if (Runtime::Current()->IsClearedJniWeakGlobal(ref)) {
+ if (runtime->IsClearedJniWeakGlobal(ref)) {
os << StringPrintf(" %5d: cleared jweak\n", idx);
continue;
}
- if (ref->GetClass() == NULL) {
+ if (ref->GetClass() == nullptr) {
// should only be possible right after a plain dvmMalloc().
size_t size = ref->SizeOf();
os << StringPrintf(" %5d: %p (raw) (%zd bytes)\n", idx, ref, size);
@@ -189,7 +180,7 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
if (element_count != 0) {
StringAppendF(&extras, " (%zd elements)", element_count);
} else if (ref->GetClass()->IsStringClass()) {
- mirror::String* s = const_cast<mirror::Object*>(ref)->AsString();
+ mirror::String* s = ref->AsString();
std::string utf8(s->ToModifiedUtf8());
if (s->GetLength() <= 16) {
StringAppendF(&extras, " \"%s\"", utf8.c_str());
@@ -200,51 +191,45 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
os << StringPrintf(" %5d: ", idx) << ref << " " << className << extras << "\n";
}
- // Make a copy of the table and sort it.
+ // Make a copy of the table and sort it, only adding non null and not cleared elements.
Table sorted_entries;
- for (size_t i = 0; i < entries.size(); ++i) {
- mirror::Object* entry = entries[i].Read();
- sorted_entries.push_back(GcRoot<mirror::Object>(entry));
- }
- std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
-
- // Remove any uninteresting stuff from the list. The sort moved them all to the end.
- while (!sorted_entries.empty() && sorted_entries.back().IsNull()) {
- sorted_entries.pop_back();
- }
- while (!sorted_entries.empty() &&
- Runtime::Current()->IsClearedJniWeakGlobal(
- sorted_entries.back().Read<kWithoutReadBarrier>())) {
- sorted_entries.pop_back();
+ for (GcRoot<mirror::Object>& root : entries) {
+ if (!root.IsNull() && !runtime->IsClearedJniWeakGlobal(root.Read())) {
+ sorted_entries.push_back(root);
+ }
}
if (sorted_entries.empty()) {
return;
}
+ std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
// Dump a summary of the whole table.
os << " Summary:\n";
size_t equiv = 0;
size_t identical = 0;
- for (size_t idx = 1; idx < count; idx++) {
- mirror::Object* prev = sorted_entries[idx-1].Read<kWithoutReadBarrier>();
- mirror::Object* current = sorted_entries[idx].Read<kWithoutReadBarrier>();
- size_t element_count = GetElementCount(prev);
- if (current == prev) {
- // Same reference, added more than once.
- identical++;
- } else if (current->GetClass() == prev->GetClass() && GetElementCount(current) == element_count) {
- // Same class / element count, different object.
- equiv++;
- } else {
- // Different class.
- DumpSummaryLine(os, prev, element_count, identical, equiv);
- equiv = identical = 0;
+ mirror::Object* prev = nullptr;
+ for (GcRoot<mirror::Object>& root : sorted_entries) {
+ mirror::Object* current = root.Read<kWithoutReadBarrier>();
+ if (prev != nullptr) {
+ const size_t element_count = GetElementCount(prev);
+ if (current == prev) {
+ // Same reference, added more than once.
+ ++identical;
+ } else if (current->GetClass() == prev->GetClass() &&
+ GetElementCount(current) == element_count) {
+ // Same class / element count, different object.
+ ++equiv;
+ } else {
+ // Different class.
+ DumpSummaryLine(os, prev, element_count, identical, equiv);
+ equiv = 0;
+ identical = 0;
+ }
}
+ prev = current;
}
// Handle the last entry.
- DumpSummaryLine(os, sorted_entries.back().Read<kWithoutReadBarrier>(),
- GetElementCount(sorted_entries.back().Read<kWithoutReadBarrier>()),
- identical, equiv);
+ DumpSummaryLine(os, prev, GetElementCount(prev), identical, equiv);
}
void ReferenceTable::VisitRoots(RootCallback* visitor, void* arg, uint32_t tid,
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 85f9938934..2aeb92d49a 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -77,12 +77,6 @@ class ArgArray {
}
void AppendWide(uint64_t value) {
- // For ARM and MIPS portable, align wide values to 8 bytes (ArgArray starts at offset of 4).
-#if defined(ART_USE_PORTABLE_COMPILER) && (defined(__arm__) || defined(__mips__))
- if (num_bytes_ % 8 == 0) {
- num_bytes_ += 4;
- }
-#endif
arg_array_[num_bytes_ / 4] = value;
arg_array_[(num_bytes_ / 4) + 1] = value >> 32;
num_bytes_ += 8;
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index eca1800c16..7aefdaab5a 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -493,7 +493,6 @@ class ReflectionTest : public CommonCompilerTest {
};
TEST_F(ReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_PORTABLE();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
@@ -518,122 +517,98 @@ TEST_F(ReflectionTest, StaticMainMethod) {
}
TEST_F(ReflectionTest, StaticNopMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeNopMethod(true);
}
TEST_F(ReflectionTest, NonStaticNopMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeNopMethod(false);
}
TEST_F(ReflectionTest, StaticIdentityByteMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityByteMethod(true);
}
TEST_F(ReflectionTest, NonStaticIdentityByteMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityByteMethod(false);
}
TEST_F(ReflectionTest, StaticIdentityIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityIntMethod(true);
}
TEST_F(ReflectionTest, NonStaticIdentityIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityIntMethod(false);
}
TEST_F(ReflectionTest, StaticIdentityDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityDoubleMethod(true);
}
TEST_F(ReflectionTest, NonStaticIdentityDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeIdentityDoubleMethod(false);
}
TEST_F(ReflectionTest, StaticSumIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntMethod(false);
}
TEST_F(ReflectionTest, StaticSumIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntMethod(false);
}
TEST_F(ReflectionTest, StaticSumIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntIntMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntIntMethod(false);
}
TEST_F(ReflectionTest, StaticSumIntIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntIntIntMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumIntIntIntIntIntMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumIntIntIntIntIntMethod(false);
}
TEST_F(ReflectionTest, StaticSumDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleMethod(false);
}
TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleMethod(false);
}
TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleDoubleMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleDoubleMethod(false);
}
TEST_F(ReflectionTest, StaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(true);
}
TEST_F(ReflectionTest, NonStaticSumDoubleDoubleDoubleDoubleDoubleMethod) {
- TEST_DISABLED_FOR_PORTABLE();
InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(false);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e79203198e..e91f7c0c90 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -126,8 +126,6 @@ namespace art {
static constexpr bool kEnableJavaStackTraceHandler = false;
Runtime* Runtime::instance_ = nullptr;
-volatile unsigned int gAborting = 0;
-
Runtime::Runtime()
: instruction_set_(kNone),
compiler_callbacks_(nullptr),
@@ -192,6 +190,13 @@ Runtime::~Runtime() {
}
Thread* self = Thread::Current();
+ if (self == nullptr) {
+ CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
+ self = Thread::Current();
+ } else {
+ LOG(WARNING) << "Current thread not detached in Runtime shutdown";
+ }
+
{
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
shutting_down_started_ = true;
@@ -200,6 +205,16 @@ Runtime::~Runtime() {
}
shutting_down_ = true;
}
+ // Shutdown and wait for the daemons.
+ CHECK(self != nullptr);
+ if (IsFinishedStarting()) {
+ self->ClearException();
+ self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
+ WellKnownClasses::java_lang_Daemons_stop);
+ }
+ DetachCurrentThread();
+ self = nullptr;
+
// Shut down background profiler before the runtime exits.
if (profiler_started_) {
BackgroundMethodSamplingProfiler::Shutdown();
@@ -238,8 +253,13 @@ Runtime::~Runtime() {
struct AbortState {
void Dump(std::ostream& os) const {
+ if (gAborting > 1) {
+ os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
+ return;
+ }
+ gAborting++;
os << "Runtime aborting...\n";
- if (Runtime::Current() == nullptr) {
+ if (Runtime::Current() == NULL) {
os << "(Runtime does not yet exist!)\n";
return;
}
@@ -297,18 +317,13 @@ struct AbortState {
void Runtime::Abort() {
gAborting++; // set before taking any locks
- if (gAborting > 1) {
- LogMessage::LogLine(__FILE__, __LINE__, INTERNAL_FATAL,
- "Runtime aborting --- recursively, so no thread-specific detail!\n");
- return;
- }
// Ensure that we don't have multiple threads trying to abort at once,
// which would result in significantly worse diagnostics.
MutexLock mu(Thread::Current(), *Locks::abort_lock_);
// Get any pending output out of the way.
- fflush(nullptr);
+ fflush(NULL);
// Many people have difficulty distinguish aborts from crashes,
// so be explicit.
@@ -316,7 +331,7 @@ void Runtime::Abort() {
LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
// Call the abort hook if we have one.
- if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
+ if (Runtime::Current() != NULL && Runtime::Current()->abort_ != NULL) {
LOG(INTERNAL_FATAL) << "Calling abort hook...";
Runtime::Current()->abort_();
// notreached
@@ -344,7 +359,7 @@ void Runtime::PreZygoteFork() {
}
void Runtime::CallExitHook(jint status) {
- if (exit_ != nullptr) {
+ if (exit_ != NULL) {
ScopedThreadStateChange tsc(Thread::Current(), kNative);
exit_(status);
LOG(WARNING) << "Exit hook returned instead of exiting!";
@@ -359,14 +374,14 @@ void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
// TODO: acquire a static mutex on Runtime to avoid racing.
- if (Runtime::instance_ != nullptr) {
+ if (Runtime::instance_ != NULL) {
return false;
}
- InitLogging(nullptr); // Calls Locks::Init() as a side effect.
+ InitLogging(NULL); // Calls Locks::Init() as a side effect.
instance_ = new Runtime;
if (!instance_->Init(options, ignore_unrecognized)) {
delete instance_;
- instance_ = nullptr;
+ instance_ = NULL;
return false;
}
return true;
@@ -374,7 +389,7 @@ bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
static jobject CreateSystemClassLoader() {
if (Runtime::Current()->UseCompileTimeClassPath()) {
- return nullptr;
+ return NULL;
}
ScopedObjectAccess soa(Thread::Current());
@@ -387,7 +402,7 @@ static jobject CreateSystemClassLoader() {
mirror::ArtMethod* getSystemClassLoader =
class_loader_class->FindDirectMethod("getSystemClassLoader", "()Ljava/lang/ClassLoader;");
- CHECK(getSystemClassLoader != nullptr);
+ CHECK(getSystemClassLoader != NULL);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
@@ -403,7 +418,7 @@ static jobject CreateSystemClassLoader() {
mirror::ArtField* contextClassLoader =
thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
- CHECK(contextClassLoader != nullptr);
+ CHECK(contextClassLoader != NULL);
// We can't run in a transaction yet.
contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
@@ -529,7 +544,7 @@ bool Runtime::InitZygote() {
// Mark rootfs as being a slave so that changes from default
// namespace only flow into our children.
- if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
+ if (mount("rootfs", "/", NULL, (MS_SLAVE | MS_REC), NULL) == -1) {
PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
return false;
}
@@ -538,7 +553,7 @@ bool Runtime::InitZygote() {
// bind mount storage into their respective private namespaces, which
// are isolated from each other.
const char* target_base = getenv("EMULATED_STORAGE_TARGET");
- if (target_base != nullptr) {
+ if (target_base != NULL) {
if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
"uid=0,gid=1028,mode=0751") == -1) {
LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
@@ -895,14 +910,14 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
"no stack trace available");
- pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(nullptr));
+ pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
// Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
// ahead of checking the application's class loader.
self->ThrowNewException(ThrowLocation(), "Ljava/lang/NoClassDefFoundError;",
"Class not found using the boot class loader; no stack trace available");
- pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException(nullptr));
+ pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
// Look for a native bridge.
@@ -958,10 +973,9 @@ void Runtime::InitNativeMethods() {
// Most JNI libraries can just use System.loadLibrary, but libcore can't because it's
// the library that implements System.loadLibrary!
{
- std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
- if (!java_vm_->LoadNativeLibrary(env, mapped_name, nullptr, &reason)) {
- LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
+ if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &reason)) {
+ LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << reason;
}
}
@@ -978,26 +992,26 @@ void Runtime::InitThreadGroups(Thread* self) {
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
- CHECK(main_thread_group_ != nullptr || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsCompiler());
system_thread_group_ =
env->NewGlobalRef(env->GetStaticObjectField(
WellKnownClasses::java_lang_ThreadGroup,
WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
- CHECK(system_thread_group_ != nullptr || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsCompiler());
}
jobject Runtime::GetMainThreadGroup() const {
- CHECK(main_thread_group_ != nullptr || IsCompiler());
+ CHECK(main_thread_group_ != NULL || IsCompiler());
return main_thread_group_;
}
jobject Runtime::GetSystemThreadGroup() const {
- CHECK(system_thread_group_ != nullptr || IsCompiler());
+ CHECK(system_thread_group_ != NULL || IsCompiler());
return system_thread_group_;
}
jobject Runtime::GetSystemClassLoader() const {
- CHECK(system_class_loader_ != nullptr || IsCompiler());
+ CHECK(system_class_loader_ != NULL || IsCompiler());
return system_class_loader_;
}
@@ -1123,12 +1137,12 @@ void Runtime::BlockSignals() {
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
}
void Runtime::DetachCurrentThread() {
Thread* self = Thread::Current();
- if (self == nullptr) {
+ if (self == NULL) {
LOG(FATAL) << "attempting to detach thread that is not attached";
}
if (self->HasManagedStack()) {
@@ -1259,10 +1273,8 @@ mirror::ArtMethod* Runtime::CreateImtConflictMethod() {
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
if (runtime->IsCompiler()) {
- method->SetEntryPointFromPortableCompiledCode(nullptr);
method->SetEntryPointFromQuickCompiledCode(nullptr);
} else {
- method->SetEntryPointFromPortableCompiledCode(GetPortableImtConflictStub());
method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
}
return method.Get();
@@ -1279,10 +1291,8 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
method->SetDexMethodIndex(DexFile::kDexNoIndex);
// When compiling, the code pointer will get set later when the image is loaded.
if (runtime->IsCompiler()) {
- method->SetEntryPointFromPortableCompiledCode(nullptr);
method->SetEntryPointFromQuickCompiledCode(nullptr);
} else {
- method->SetEntryPointFromPortableCompiledCode(GetPortableResolutionStub());
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
}
return method.Get();
@@ -1297,7 +1307,6 @@ mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
method->SetDeclaringClass(mirror::ArtMethod::GetJavaLangReflectArtMethod());
// TODO: use a special method for callee saves
method->SetDexMethodIndex(DexFile::kDexNoIndex);
- method->SetEntryPointFromPortableCompiledCode(nullptr);
method->SetEntryPointFromQuickCompiledCode(nullptr);
DCHECK_NE(instruction_set_, kNone);
return method.Get();
@@ -1353,7 +1362,7 @@ void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type
}
const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(jobject class_loader) {
- if (class_loader == nullptr) {
+ if (class_loader == NULL) {
return GetClassLinker()->GetBootClassPath();
}
CHECK(UseCompileTimeClassPath());
@@ -1371,12 +1380,18 @@ void Runtime::SetCompileTimeClassPath(jobject class_loader,
void Runtime::AddMethodVerifier(verifier::MethodVerifier* verifier) {
DCHECK(verifier != nullptr);
+ if (gAborting) {
+ return;
+ }
MutexLock mu(Thread::Current(), method_verifier_lock_);
method_verifiers_.insert(verifier);
}
void Runtime::RemoveMethodVerifier(verifier::MethodVerifier* verifier) {
DCHECK(verifier != nullptr);
+ if (gAborting) {
+ return;
+ }
MutexLock mu(Thread::Current(), method_verifier_lock_);
auto it = method_verifiers_.find(verifier);
CHECK(it != method_verifiers_.end());
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e334764daa..39fd910893 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -71,11 +71,6 @@ class ThreadList;
class Trace;
class Transaction;
-// 0 if not abort, non-zero if an abort is in progress. Used on fatal exit to prevents recursive
-// aborts. Global declaration allows us to disable some error checking to ensure fatal shutdown
-// makes forward progress.
-extern volatile unsigned int gAborting;
-
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
// Not all combinations of flags are valid. You may not visit all roots as well as the new roots
@@ -180,9 +175,9 @@ class Runtime {
return instance_;
}
- // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most callers should
- // prefer. Not [[noreturn]] due to returning early in the case of recursive aborts.
- static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
+ // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
+ // callers should prefer.
+ [[noreturn]] static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 33641edb50..33600ddba5 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -38,6 +38,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
_exit(1);
}
handling_unexpected_signal = true;
+ gAborting++; // set before taking any locks
MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
Runtime* runtime = Runtime::Current();
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index 927309177a..1de035c0d5 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -284,6 +284,7 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
}
handlingUnexpectedSignal = true;
+ gAborting++; // set before taking any locks
MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 43714b95e8..aaa5b898b3 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -113,6 +113,9 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
}
}
+extern "C" mirror::Object* artQuickGetProxyThisObject(StackReference<mirror::ArtMethod>* sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
mirror::Object* StackVisitor::GetThisObject() const {
mirror::ArtMethod* m = GetMethod();
if (m->IsStatic()) {
@@ -125,6 +128,12 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
+ } else if (m->IsProxyMethod()) {
+ if (cur_quick_frame_ != nullptr) {
+ return artQuickGetProxyThisObject(cur_quick_frame_);
+ } else {
+ return cur_shadow_frame_->GetVRegReference(0);
+ }
} else if (m->IsOptimized(sizeof(void*))) {
// TODO: Implement, currently only used for exceptions when jdwp is enabled.
UNIMPLEMENTED(WARNING)
diff --git a/runtime/stack.h b/runtime/stack.h
index 1d772e6ae2..15007af85a 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -72,8 +72,7 @@ class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType>
: mirror::ObjectReference<false, MirrorType>(p) {}
};
-// ShadowFrame has 3 possible layouts:
-// - portable - a unified array of VRegs and references. Precise references need GC maps.
+// ShadowFrame has 2 possible layouts:
// - interpreter - separate VRegs and reference arrays. References are in the reference array.
// - JNI - just VRegs, but where every VReg holds a reference.
class ShadowFrame {
@@ -100,28 +99,11 @@ class ShadowFrame {
~ShadowFrame() {}
bool HasReferenceArray() const {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return (number_of_vregs_ & kHasReferenceArray) != 0;
-#else
return true;
-#endif
}
uint32_t NumberOfVRegs() const {
-#if defined(ART_USE_PORTABLE_COMPILER)
- return number_of_vregs_ & ~kHasReferenceArray;
-#else
return number_of_vregs_;
-#endif
- }
-
- void SetNumberOfVRegs(uint32_t number_of_vregs) {
-#if defined(ART_USE_PORTABLE_COMPILER)
- number_of_vregs_ = number_of_vregs | (number_of_vregs_ & kHasReferenceArray);
-#else
- UNUSED(number_of_vregs);
- UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
-#endif
}
uint32_t GetDexPC() const {
@@ -270,16 +252,6 @@ class ShadowFrame {
ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetMethod(mirror::ArtMethod* method) {
-#if defined(ART_USE_PORTABLE_COMPILER)
- DCHECK(method != nullptr);
- method_ = method;
-#else
- UNUSED(method);
- UNIMPLEMENTED(FATAL) << "Should only be called when portable is enabled";
-#endif
- }
-
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
return ((&References()[0] <= shadow_frame_entry_obj) &&
@@ -316,10 +288,6 @@ class ShadowFrame {
uint32_t dex_pc, bool has_reference_array)
: number_of_vregs_(num_vregs), link_(link), method_(method), dex_pc_(dex_pc) {
if (has_reference_array) {
-#if defined(ART_USE_PORTABLE_COMPILER)
- CHECK_LT(num_vregs, static_cast<uint32_t>(kHasReferenceArray));
- number_of_vregs_ |= kHasReferenceArray;
-#endif
memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
} else {
memset(vregs_, 0, num_vregs * sizeof(uint32_t));
@@ -336,13 +304,7 @@ class ShadowFrame {
return const_cast<StackReference<mirror::Object>*>(const_cast<const ShadowFrame*>(this)->References());
}
-#if defined(ART_USE_PORTABLE_COMPILER)
- constexpr uint32_t kHasReferenceArray = 1ul << 31;
- // TODO: make const in the portable case.
- uint32_t number_of_vregs_;
-#else
const uint32_t number_of_vregs_;
-#endif
// Link to previous shadow frame or NULL.
ShadowFrame* link_;
mirror::ArtMethod* method_;
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index a58ecab17d..7cc3e57895 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -19,6 +19,7 @@
#include "base/bit_vector.h"
#include "memory_region.h"
+#include "utils.h"
namespace art {
@@ -199,6 +200,11 @@ class StackMap {
&& region_.size() == other.region_.size();
}
+ static size_t ComputeAlignedStackMapSize(size_t stack_mask_size) {
+ // On ARM, the stack maps must be 4-byte aligned.
+ return RoundUp(StackMap::kFixedSize + stack_mask_size, 4);
+ }
+
private:
static constexpr int kDexPcOffset = 0;
static constexpr int kNativePcOffsetOffset = kDexPcOffset + sizeof(uint32_t);
@@ -262,7 +268,7 @@ class CodeInfo {
}
size_t StackMapSize() const {
- return StackMap::kFixedSize + GetStackMaskSize();
+ return StackMap::ComputeAlignedStackMapSize(GetStackMaskSize());
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) {
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 49b7be9edd..7aed8b033c 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -83,7 +83,9 @@ inline ThreadState Thread::SetState(ThreadState new_state) {
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
if (kIsDebugBuild) {
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (gAborting == 0) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ }
if (check_locks) {
bool bad_mutexes_held = false;
for (int i = kLockLevelCount - 1; i >= 0; --i) {
@@ -97,7 +99,9 @@ inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
}
}
}
- CHECK(!bad_mutexes_held);
+ if (gAborting == 0) {
+ CHECK(!bad_mutexes_held);
+ }
}
}
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a6e5b0c18b..2308cc9f11 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -91,7 +91,7 @@ static void UnimplementedEntryPoint() {
}
void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
+ QuickEntryPoints* qpoints);
void Thread::InitTlsEntryPoints() {
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
@@ -102,7 +102,7 @@ void Thread::InitTlsEntryPoints() {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
- &tlsPtr_.portable_entrypoints, &tlsPtr_.quick_entrypoints);
+ &tlsPtr_.quick_entrypoints);
}
void Thread::ResetQuickAllocEntryPointsForThread() {
@@ -742,7 +742,7 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
// Don't do this if we are aborting since the GC may have all the threads suspended. This will
// cause ScopedObjectAccessUnchecked to deadlock.
- if (self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
+ if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
ScopedObjectAccessUnchecked soa(self);
priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
->GetInt(thread->tlsPtr_.opeer);
@@ -1865,16 +1865,6 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
JNI_ENTRY_POINT_INFO(pDlsymLookup)
#undef JNI_ENTRY_POINT_INFO
-#define PORTABLE_ENTRY_POINT_INFO(x) \
- if (PORTABLE_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
- os << #x; \
- return; \
- }
- PORTABLE_ENTRY_POINT_INFO(pPortableImtConflictTrampoline)
- PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline)
- PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge)
-#undef PORTABLE_ENTRY_POINT_INFO
-
#define QUICK_ENTRY_POINT_INFO(x) \
if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
os << #x; \
diff --git a/runtime/thread.h b/runtime/thread.h
index 5b3e746cc9..c3a97514e5 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -31,10 +31,10 @@
#include "base/mutex.h"
#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/portable/portable_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
#include "handle_scope.h"
+#include "instrumentation.h"
#include "jvalue.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -549,12 +549,6 @@ class Thread {
}
template<size_t pointer_size>
- static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
- }
-
- template<size_t pointer_size>
static ThreadOffset<pointer_size> SelfOffset() {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
}
@@ -1128,7 +1122,6 @@ class Thread {
// TODO: move this to more of a global offset table model to avoid per-thread duplication.
InterpreterEntryPoints interpreter_entrypoints;
JniEntryPoints jni_entrypoints;
- PortableEntryPoints portable_entrypoints;
QuickEntryPoints quick_entrypoints;
// Thread-local allocation pointer.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 71325a5350..6a9111fad5 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -168,7 +168,9 @@ class DumpCheckpoint FINAL : public Closure {
const uint32_t kWaitTimeoutMs = 10000;
bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kWaitTimeoutMs);
if (timed_out) {
- LOG(kIsDebugBuild ? FATAL : ERROR) << "Unexpected time out during dump checkpoint.";
+ // Avoid a recursive abort.
+ LOG((kIsDebugBuild && (gAborting == 0)) ? FATAL : ERROR)
+ << "Unexpected time out during dump checkpoint.";
}
}
@@ -241,7 +243,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && gAborting == 0) {
CHECK_NE(self->GetState(), kRunnable);
}
@@ -502,9 +504,9 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
useconds_t total_delay_us = 0;
useconds_t delay_us = 0;
- bool did_suspend_request = false;
*timed_out = false;
Thread* self = Thread::Current();
+ Thread* suspended_thread = nullptr;
VLOG(threads) << "SuspendThreadByPeer starting";
while (true) {
Thread* thread;
@@ -518,10 +520,18 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
if (thread == nullptr) {
+ if (suspended_thread != nullptr) {
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
+ // If we incremented the suspend count but the thread reset its peer, we need to
+ // re-decrement it since it is shutting down and may deadlock the runtime in
+ // ThreadList::WaitForOtherNonDaemonThreadsToExit.
+ suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
+ }
ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
return nullptr;
}
if (!Contains(thread)) {
+ CHECK(suspended_thread == nullptr);
VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
<< reinterpret_cast<void*>(thread);
return nullptr;
@@ -536,9 +546,10 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
// which will allow this thread to be suspended.
continue;
}
- thread->ModifySuspendCount(self, +1, debug_suspension);
+ CHECK(suspended_thread == nullptr);
+ suspended_thread = thread;
+ suspended_thread->ModifySuspendCount(self, +1, debug_suspension);
request_suspension = false;
- did_suspend_request = true;
} else {
// If the caller isn't requesting suspension, a suspension should have already occurred.
CHECK_GT(thread->GetSuspendCount(), 0);
@@ -557,8 +568,9 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
}
if (total_delay_us >= kTimeoutUs) {
ThreadSuspendByPeerWarning(self, FATAL, "Thread suspension timed out", peer);
- if (did_suspend_request) {
- thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
+ if (suspended_thread != nullptr) {
+ CHECK_EQ(suspended_thread, thread);
+ suspended_thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
}
*timed_out = true;
return nullptr;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index b5108443b0..29a3b09acd 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -38,9 +38,7 @@
#include "ScopedLocalRef.h"
#include "thread.h"
#include "thread_list.h"
-#if !defined(ART_USE_PORTABLE_COMPILER)
#include "entrypoints/quick/quick_entrypoints.h"
-#endif
namespace art {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 1b3cc8ff93..81172cb83a 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -725,7 +725,16 @@ bool MethodVerifier::VerifyInstructions() {
/* Flag instructions that are garbage collection points */
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
- if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow()) {
+ if (inst->IsBranch()) {
+ insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ // The compiler also needs safepoints for fall-through to loop heads.
+ // Such a loop head must be a target of a branch.
+ int32_t offset = 0;
+ bool cond, self_ok;
+ bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
+ DCHECK(target_ok);
+ insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+ } else if (inst->IsSwitch() || inst->IsThrow()) {
insn_flags_[dex_pc].SetCompileTimeInfoPoint();
} else if (inst->IsReturn()) {
insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
@@ -2696,6 +2705,18 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::IGET_OBJECT_QUICK:
VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false);
break;
+ case Instruction::IGET_BOOLEAN_QUICK:
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true);
+ break;
+ case Instruction::IGET_BYTE_QUICK:
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true);
+ break;
+ case Instruction::IGET_CHAR_QUICK:
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true);
+ break;
+ case Instruction::IGET_SHORT_QUICK:
+ VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true);
+ break;
case Instruction::IPUT_QUICK:
VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true);
break;
@@ -2735,31 +2756,10 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
/* These should never appear during verification. */
- case Instruction::UNUSED_3E:
- case Instruction::UNUSED_3F:
- case Instruction::UNUSED_40:
- case Instruction::UNUSED_41:
- case Instruction::UNUSED_42:
- case Instruction::UNUSED_43:
+ case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
+ case Instruction::UNUSED_F3 ... Instruction::UNUSED_FF:
case Instruction::UNUSED_79:
case Instruction::UNUSED_7A:
- case Instruction::UNUSED_EF:
- case Instruction::UNUSED_F0:
- case Instruction::UNUSED_F1:
- case Instruction::UNUSED_F2:
- case Instruction::UNUSED_F3:
- case Instruction::UNUSED_F4:
- case Instruction::UNUSED_F5:
- case Instruction::UNUSED_F6:
- case Instruction::UNUSED_F7:
- case Instruction::UNUSED_F8:
- case Instruction::UNUSED_F9:
- case Instruction::UNUSED_FA:
- case Instruction::UNUSED_FB:
- case Instruction::UNUSED_FC:
- case Instruction::UNUSED_FD:
- case Instruction::UNUSED_FE:
- case Instruction::UNUSED_FF:
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Unexpected opcode " << inst->DumpString(dex_file_);
break;
@@ -3159,7 +3159,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
}
// See if the method type implied by the invoke instruction matches the access flags for the
// target method.
- if ((method_type == METHOD_DIRECT && !res_method->IsDirect()) ||
+ if ((method_type == METHOD_DIRECT && (!res_method->IsDirect() || res_method->IsStatic())) ||
(method_type == METHOD_STATIC && !res_method->IsStatic()) ||
((method_type == METHOD_VIRTUAL || method_type == METHOD_INTERFACE) && res_method->IsDirect())
) {
@@ -3408,7 +3408,7 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
return nullptr;
}
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- if (FailOrAbort(this, !Thread::Current()->IsExceptionPending(),
+ if (FailOrAbort(this, !self_->IsExceptionPending(),
"Unexpected exception pending for quickened invoke at ",
work_insn_idx_)) {
return nullptr;
@@ -3418,7 +3418,9 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst,
bool is_range) {
- DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
+ DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
+ << PrettyMethod(dex_method_idx_, *dex_file_, true) << "@" << work_insn_idx_;
+
mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
is_range);
if (res_method == nullptr) {
@@ -3900,6 +3902,10 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
DCHECK(inst->Opcode() == Instruction::IGET_QUICK ||
inst->Opcode() == Instruction::IGET_WIDE_QUICK ||
inst->Opcode() == Instruction::IGET_OBJECT_QUICK ||
+ inst->Opcode() == Instruction::IGET_BOOLEAN_QUICK ||
+ inst->Opcode() == Instruction::IGET_BYTE_QUICK ||
+ inst->Opcode() == Instruction::IGET_CHAR_QUICK ||
+ inst->Opcode() == Instruction::IGET_SHORT_QUICK ||
inst->Opcode() == Instruction::IPUT_QUICK ||
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK ||
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 0c4bf3c703..c3bd4af21a 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -238,6 +238,10 @@ class MethodVerifier {
bool HasFailures() const;
const RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst,
+ RegisterLine* reg_line,
+ bool is_range)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
// Private constructor for dumping.
@@ -586,11 +590,6 @@ class MethodVerifier {
mirror::ArtMethod* res_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line,
- bool is_range)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
mirror::ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 16338c476d..e368d2c08c 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -66,9 +66,9 @@ jmethodID WellKnownClasses::java_lang_Byte_valueOf;
jmethodID WellKnownClasses::java_lang_Character_valueOf;
jmethodID WellKnownClasses::java_lang_ClassLoader_loadClass;
jmethodID WellKnownClasses::java_lang_ClassNotFoundException_init;
-jmethodID WellKnownClasses::java_lang_Daemons_requestGC;
jmethodID WellKnownClasses::java_lang_Daemons_requestHeapTrim;
jmethodID WellKnownClasses::java_lang_Daemons_start;
+jmethodID WellKnownClasses::java_lang_Daemons_stop;
jmethodID WellKnownClasses::java_lang_Double_valueOf;
jmethodID WellKnownClasses::java_lang_Float_valueOf;
jmethodID WellKnownClasses::java_lang_Integer_valueOf;
@@ -204,9 +204,9 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V");
java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
- java_lang_Daemons_requestGC = CacheMethod(env, java_lang_Daemons, true, "requestGC", "()V");
java_lang_Daemons_requestHeapTrim = CacheMethod(env, java_lang_Daemons, true, "requestHeapTrim", "()V");
java_lang_Daemons_start = CacheMethod(env, java_lang_Daemons, true, "start", "()V");
+ java_lang_Daemons_stop = CacheMethod(env, java_lang_Daemons, true, "stop", "()V");
ScopedLocalRef<jclass> java_lang_ref_FinalizerReference(env, env->FindClass("java/lang/ref/FinalizerReference"));
java_lang_ref_FinalizerReference_add = CacheMethod(env, java_lang_ref_FinalizerReference.get(), true, "add", "(Ljava/lang/Object;)V");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index d651b90c07..1a4f0f8b85 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -77,9 +77,9 @@ struct WellKnownClasses {
static jmethodID java_lang_Character_valueOf;
static jmethodID java_lang_ClassLoader_loadClass;
static jmethodID java_lang_ClassNotFoundException_init;
- static jmethodID java_lang_Daemons_requestGC;
static jmethodID java_lang_Daemons_requestHeapTrim;
static jmethodID java_lang_Daemons_start;
+ static jmethodID java_lang_Daemons_stop;
static jmethodID java_lang_Double_valueOf;
static jmethodID java_lang_Float_valueOf;
static jmethodID java_lang_Integer_valueOf;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 631c4be90f..40be56cc7f 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -52,11 +52,11 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
// 0025: goto 0013
- // Detaled dex instructions for ReferenceMap.java are at the end of this function.
+ // Detailed dex instructions for ReferenceMap.java are at the end of this function.
// CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
// We eliminate the non-live registers at a return, so only v3 is live.
// Note that it is OK for a compiler to not have a dex map at this dex PC because
- // a return is not a safepoint.
+ // a return is not necessarily a safepoint.
CHECK_REGS_CONTAIN_REFS(0x13U, false); // v3: y
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
@@ -68,8 +68,10 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x2cU, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x2fU, true, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
- CHECK_REGS_CONTAIN_REFS(0x32U, true, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
+ // Note that it is OK for a compiler to not have a dex map at these two dex PCs because
+ // a goto is not necessarily a safepoint.
+ CHECK_REGS_CONTAIN_REFS(0x2fU, false, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x32U, false, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
}
return true;
diff --git a/test/015-switch/expected.txt b/test/015-switch/expected.txt
index 91b47142d1..be6d2cac55 100644
--- a/test/015-switch/expected.txt
+++ b/test/015-switch/expected.txt
@@ -1,3 +1,119 @@
+packed
+default
+default
+0
+1
+2
+default
+default
+packed2
+-2
+-1
+0
+1
+2
+default
+default
+packed3
+default
+default
+default
+default
+2
+3
+4
+5
+6
+default
+default
+packed4
+default
+2147483646
+2147483647
+default
+packed5
+-2147483648
+-2147483647
+default
+packed6
+-2147483648
+default
+packed7
+default
+default
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+default
+sparse
+default
+default
+0
+1
+default
+3
+default
+default
+sparse2
+-2
+-1
+0
+default
+2
+default
+default
+sparse3
+default
+default
+default
+default
+2
+default
+4
+5
+6
+default
+default
+sparse4
+2147483645
+default
+2147483647
+default
+sparse5
+-2147483648
+default
+default
+sparse7
+default
+default
+1
+2
+default
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+default
CORRECT (one)
CORRECT (not found)
CORRECT (large)
diff --git a/test/015-switch/src/Main.java b/test/015-switch/src/Main.java
index dd97a8c60b..2a7995a075 100644
--- a/test/015-switch/src/Main.java
+++ b/test/015-switch/src/Main.java
@@ -18,7 +18,338 @@
* Test switch() blocks
*/
public class Main {
+
+ // TODO: This should be translated to smali tests, so it is guaranteed we have the right kind
+ // of switch.
+
+ // Simple packed-switch.
+ public static void packedSwitch(int value) {
+ switch (value) {
+ case 0:
+ System.out.println("0"); break;
+ case 1:
+ System.out.println("1"); break;
+ case 2:
+ System.out.println("2"); break;
+ case 3:
+ System.out.println("3"); break;
+ case 4:
+ System.out.println("4"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple packed-switch starting at a negative index.
+ public static void packedSwitch2(int value) {
+ switch (value) {
+ case -3:
+ System.out.println("-3"); break;
+ case -2:
+ System.out.println("-2"); break;
+ case -1:
+ System.out.println("-1"); break;
+ case 0:
+ System.out.println("0"); break;
+ case 1:
+ System.out.println("1"); break;
+ case 2:
+ System.out.println("2"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple packed-switch starting above 0.
+ public static void packedSwitch3(int value) {
+ switch (value) {
+ case 2:
+ System.out.println("2"); break;
+ case 3:
+ System.out.println("3"); break;
+ case 4:
+ System.out.println("4"); break;
+ case 5:
+ System.out.println("5"); break;
+ case 6:
+ System.out.println("6"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple packed-switch going up to max_int.
+ public static void packedSwitch4(int value) {
+ switch (value) {
+ case Integer.MAX_VALUE - 1:
+ System.out.println(Integer.MAX_VALUE - 1); break;
+ case Integer.MAX_VALUE:
+ System.out.println(Integer.MAX_VALUE); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple packed-switch starting at min_int.
+ public static void packedSwitch5(int value) {
+ switch (value) {
+ case Integer.MIN_VALUE:
+ System.out.println(Integer.MIN_VALUE); break;
+ case Integer.MIN_VALUE + 1:
+ System.out.println(Integer.MIN_VALUE + 1); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple (packed-)switch with only min_int.
+ public static void packedSwitch6(int value) {
+ switch (value) {
+ case Integer.MIN_VALUE:
+ System.out.println(Integer.MIN_VALUE); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Long packed-switch that might lead to not creating chained-ifs.
+ public static void packedSwitch7(int value) {
+ switch (value) {
+ case 1:
+ System.out.println(1); break;
+ case 2:
+ System.out.println(2); break;
+ case 3:
+ System.out.println(3); break;
+ case 4:
+ System.out.println(4); break;
+ case 5:
+ System.out.println(5); break;
+ case 6:
+ System.out.println(6); break;
+ case 7:
+ System.out.println(7); break;
+ case 8:
+ System.out.println(8); break;
+ case 9:
+ System.out.println(9); break;
+ case 10:
+ System.out.println(10); break;
+ case 11:
+ System.out.println(11); break;
+ case 12:
+ System.out.println(12); break;
+ case 13:
+ System.out.println(13); break;
+ case 14:
+ System.out.println(14); break;
+ case 15:
+ System.out.println(15); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Sparse switch, just leave a gap.
+ public static void sparseSwitch(int value) {
+ switch (value) {
+ case 0:
+ System.out.println("0"); break;
+ case 1:
+ System.out.println("1"); break;
+ case 3:
+ System.out.println("3"); break;
+ case 4:
+ System.out.println("4"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple sparse-switch starting at a negative index.
+ public static void sparseSwitch2(int value) {
+ switch (value) {
+ case -3:
+ System.out.println("-3"); break;
+ case -2:
+ System.out.println("-2"); break;
+ case -1:
+ System.out.println("-1"); break;
+ case 0:
+ System.out.println("0"); break;
+ case 2:
+ System.out.println("2"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple sparse-switch starting above 0.
+ public static void sparseSwitch3(int value) {
+ switch (value) {
+ case 2:
+ System.out.println("2"); break;
+ case 4:
+ System.out.println("4"); break;
+ case 5:
+ System.out.println("5"); break;
+ case 6:
+ System.out.println("6"); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple sparse-switch going up to max_int.
+ public static void sparseSwitch4(int value) {
+ switch (value) {
+ case Integer.MAX_VALUE - 2:
+ System.out.println(Integer.MAX_VALUE - 2); break;
+ case Integer.MAX_VALUE:
+ System.out.println(Integer.MAX_VALUE); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Simple sparse-switch starting at min_int.
+ public static void sparseSwitch5(int value) {
+ switch (value) {
+ case Integer.MIN_VALUE:
+ System.out.println(Integer.MIN_VALUE); break;
+ case Integer.MIN_VALUE + 2:
+ System.out.println(Integer.MIN_VALUE + 2); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
+ // Long sparse-switch that might lead to not creating chained-ifs.
+ public static void sparseSwitch7(int value) {
+ switch (value) {
+ case 1:
+ System.out.println(1); break;
+ case 2:
+ System.out.println(2); break;
+ case 4:
+ System.out.println(4); break;
+ case 5:
+ System.out.println(5); break;
+ case 6:
+ System.out.println(6); break;
+ case 7:
+ System.out.println(7); break;
+ case 8:
+ System.out.println(8); break;
+ case 9:
+ System.out.println(9); break;
+ case 10:
+ System.out.println(10); break;
+ case 11:
+ System.out.println(11); break;
+ case 12:
+ System.out.println(12); break;
+ case 13:
+ System.out.println(13); break;
+ case 14:
+ System.out.println(14); break;
+ case 15:
+ System.out.println(15); break;
+ default:
+ System.out.println("default"); break;
+ }
+ }
+
public static void main(String args[]) {
+ /*
+ * Note: We are using for loops and calls to hopefully avoid simplifying the switch
+ * structure from constant propagation. When inlining is supported, this needs to
+ * be revisited.
+ */
+
+ System.out.println("packed");
+ for (int i = -2; i < 3; i++) {
+ packedSwitch(i);
+ }
+ packedSwitch(Integer.MIN_VALUE);
+ packedSwitch(Integer.MAX_VALUE);
+
+ System.out.println("packed2");
+ for (int i = -2; i < 3; i++) {
+ packedSwitch2(i);
+ }
+ packedSwitch2(Integer.MIN_VALUE);
+ packedSwitch2(Integer.MAX_VALUE);
+
+ System.out.println("packed3");
+ for (int i = -2; i < 7; i++) {
+ packedSwitch3(i);
+ }
+ packedSwitch3(Integer.MIN_VALUE);
+ packedSwitch3(Integer.MAX_VALUE);
+
+ System.out.println("packed4");
+ for (int i = Integer.MAX_VALUE - 2; i > 0; i++) {
+ packedSwitch4(i);
+ }
+ packedSwitch4(Integer.MIN_VALUE);
+
+ System.out.println("packed5");
+ for (int i = Integer.MIN_VALUE; i < Integer.MIN_VALUE + 2; i++) {
+ packedSwitch5(i);
+ }
+ packedSwitch5(Integer.MAX_VALUE);
+
+ System.out.println("packed6");
+ packedSwitch6(Integer.MIN_VALUE);
+ packedSwitch6(Integer.MAX_VALUE);
+
+ System.out.println("packed7");
+ for (int i = -1; i < 17; i++) {
+ packedSwitch7(i);
+ }
+
+
+ System.out.println("sparse");
+ for (int i = -2; i < 4; i++) {
+ sparseSwitch(i);
+ }
+ sparseSwitch(Integer.MIN_VALUE);
+ sparseSwitch(Integer.MAX_VALUE);
+
+ System.out.println("sparse2");
+ for (int i = -2; i < 3; i++) {
+ sparseSwitch2(i);
+ }
+ sparseSwitch2(Integer.MIN_VALUE);
+ sparseSwitch2(Integer.MAX_VALUE);
+
+ System.out.println("sparse3");
+ for (int i = -2; i < 7; i++) {
+ sparseSwitch3(i);
+ }
+ sparseSwitch3(Integer.MIN_VALUE);
+ sparseSwitch3(Integer.MAX_VALUE);
+
+ System.out.println("sparse4");
+ for (int i = Integer.MAX_VALUE - 2; i > 0; i++) {
+ sparseSwitch4(i);
+ }
+ sparseSwitch4(Integer.MIN_VALUE);
+
+ System.out.println("sparse5");
+ for (int i = Integer.MIN_VALUE; i < Integer.MIN_VALUE + 2; i++) {
+ sparseSwitch5(i);
+ }
+ sparseSwitch5(Integer.MAX_VALUE);
+
+ System.out.println("sparse7");
+ for (int i = -1; i < 17; i++) {
+ sparseSwitch7(i);
+ }
+
+ // Older tests.
+
int a = 1;
switch (a) {
diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java
index ae105761f4..cd5130d127 100644
--- a/test/109-suspend-check/src/Main.java
+++ b/test/109-suspend-check/src/Main.java
@@ -21,10 +21,15 @@ public class Main {
System.out.println("Running (" + TEST_TIME + " seconds) ...");
InfiniteForLoop forLoop = new InfiniteForLoop();
InfiniteWhileLoop whileLoop = new InfiniteWhileLoop();
+ InfiniteWhileLoopWithIntrinsic whileLoopWithIntrinsic =
+ new InfiniteWhileLoopWithIntrinsic();
+ InfiniteDoWhileLoopWithLong doWhileLoopWithLong = new InfiniteDoWhileLoopWithLong();
InfiniteDoWhileLoop doWhileLoop = new InfiniteDoWhileLoop();
MakeGarbage garbage = new MakeGarbage();
forLoop.start();
whileLoop.start();
+ whileLoopWithIntrinsic.start();
+ doWhileLoopWithLong.start();
doWhileLoop.start();
garbage.start();
for (int i = 0; i < TEST_TIME; i++) {
@@ -34,6 +39,8 @@ public class Main {
}
forLoop.stopNow();
whileLoop.stopNow();
+ whileLoopWithIntrinsic.stopNow();
+ doWhileLoopWithLong.stopNow();
doWhileLoop.stopNow();
garbage.stopNow();
System.out.println("Done.");
@@ -48,6 +55,35 @@ public class Main {
}
}
+class InfiniteWhileLoopWithIntrinsic extends Thread {
+ volatile private boolean keepGoing = true;
+ private String[] strings = { "a", "b", "c", "d" };
+ private int sum = 0;
+ public void run() {
+ int i = 0;
+ while (keepGoing) {
+ i++;
+ sum += strings[i & 3].length();
+ }
+ }
+ public void stopNow() {
+ keepGoing = false;
+ }
+}
+
+class InfiniteDoWhileLoopWithLong extends Thread {
+ volatile private long keepGoing = 7L;
+ public void run() {
+ int i = 0;
+ do {
+ i++;
+ } while (keepGoing >= 4L);
+ }
+ public void stopNow() {
+ keepGoing = 1L;
+ }
+}
+
class InfiniteWhileLoop extends Thread {
volatile private boolean keepGoing = true;
public void run() {
diff --git a/test/118-noimage-dex2oat/expected.txt b/test/118-noimage-dex2oat/expected.txt
index 6825fae41f..bcb695d232 100644
--- a/test/118-noimage-dex2oat/expected.txt
+++ b/test/118-noimage-dex2oat/expected.txt
@@ -1,6 +1,9 @@
Run -Xnoimage-dex2oat
Has image is false, is image dex2oat enabled is false, is BOOTCLASSPATH on disk is false.
+testB18485243 PASS
Run -Ximage-dex2oat
Has image is true, is image dex2oat enabled is true, is BOOTCLASSPATH on disk is true.
+testB18485243 PASS
Run default
Has image is true, is image dex2oat enabled is true, is BOOTCLASSPATH on disk is true.
+testB18485243 PASS
diff --git a/test/118-noimage-dex2oat/smali/b_18485243.smali b/test/118-noimage-dex2oat/smali/b_18485243.smali
new file mode 100644
index 0000000000..41fbc74678
--- /dev/null
+++ b/test/118-noimage-dex2oat/smali/b_18485243.smali
@@ -0,0 +1,22 @@
+.class public LB18485243;
+.super Ljava/lang/Object;
+.source "b_18485243.smali"
+
+.method public constructor <init>()V
+ .registers 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method private static toInt()I
+ .registers 1
+ const v0, 0
+ return v0
+.end method
+
+.method public run()I
+ .registers 3
+ invoke-direct {p0}, LB18485243;->toInt()I
+ move-result v0
+ return v0
+.end method
diff --git a/test/118-noimage-dex2oat/src/Main.java b/test/118-noimage-dex2oat/src/Main.java
index c83b84de60..9bf5bb3b08 100644
--- a/test/118-noimage-dex2oat/src/Main.java
+++ b/test/118-noimage-dex2oat/src/Main.java
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
public class Main {
@@ -36,6 +37,8 @@ public class Main {
} else if (!hasImage && isBootClassPathOnDisk) {
throw new Error("Image with dex2oat enabled runs without an image file");
}
+
+ testB18485243();
}
static {
@@ -67,4 +70,19 @@ public class Main {
return (boolean) isBootClassPathOnDiskMethod.invoke(null, instructionSet);
}
}
+
+ private static void testB18485243() throws Exception {
+ Class<?> k = Class.forName("B18485243");
+ Object o = k.newInstance();
+ Method m = k.getDeclaredMethod("run");
+ try {
+ m.invoke(o);
+ } catch (InvocationTargetException e) {
+ Throwable actual = e.getTargetException();
+ if (!(actual instanceof IncompatibleClassChangeError)) {
+ throw new AssertionError("Expected IncompatibleClassChangeError", actual);
+ }
+ }
+ System.out.println("testB18485243 PASS");
+ }
}
diff --git a/test/129-ThreadGetId/expected.txt b/test/129-ThreadGetId/expected.txt
new file mode 100644
index 0000000000..134d8d0b47
--- /dev/null
+++ b/test/129-ThreadGetId/expected.txt
@@ -0,0 +1 @@
+Finishing
diff --git a/test/129-ThreadGetId/info.txt b/test/129-ThreadGetId/info.txt
new file mode 100644
index 0000000000..443062db96
--- /dev/null
+++ b/test/129-ThreadGetId/info.txt
@@ -0,0 +1 @@
+Regression test for b/18661622
diff --git a/test/129-ThreadGetId/src/Main.java b/test/129-ThreadGetId/src/Main.java
new file mode 100644
index 0000000000..9934bba95f
--- /dev/null
+++ b/test/129-ThreadGetId/src/Main.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Map;
+
+public class Main implements Runnable {
+ static final int numberOfThreads = 5;
+ static final int totalOperations = 1000;
+
+ public static void main(String[] args) throws Exception {
+ final Thread[] threads = new Thread[numberOfThreads];
+ for (int t = 0; t < threads.length; t++) {
+ threads[t] = new Thread(new Main());
+ threads[t].start();
+ }
+ for (Thread t : threads) {
+ t.join();
+ }
+ System.out.println("Finishing");
+ }
+
+ public void test_getId() {
+ if (Thread.currentThread().getId() <= 0) {
+ System.out.println("current thread's ID is not positive");
+ }
+ // Check all the current threads for positive IDs.
+ Map<Thread, StackTraceElement[]> stMap = Thread.getAllStackTraces();
+ for (Thread thread : stMap.keySet()) {
+ if (thread.getId() <= 0) {
+ System.out.println("thread's ID is not positive: " + thread.getName());
+ }
+ }
+ }
+
+ public void run() {
+ for (int i = 0; i < totalOperations; ++i) {
+ test_getId();
+ }
+ }
+}
diff --git a/test/436-shift-constant/expected.txt b/test/436-shift-constant/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/436-shift-constant/expected.txt
diff --git a/test/436-shift-constant/info.txt b/test/436-shift-constant/info.txt
new file mode 100644
index 0000000000..dc206461b0
--- /dev/null
+++ b/test/436-shift-constant/info.txt
@@ -0,0 +1 @@
+Regression tests for shift instructions and constants larger than 8bits.
diff --git a/runtime/arch/arm64/portable_entrypoints_arm64.S b/test/436-shift-constant/src/Main.java
index 9e2c030d71..e69f64bfc2 100644
--- a/runtime/arch/arm64/portable_entrypoints_arm64.S
+++ b/test/436-shift-constant/src/Main.java
@@ -14,17 +14,29 @@
* limitations under the License.
*/
-#include "asm_support_arm64.S"
+public class Main {
+ public static void main(String[] args) {
+ assertEquals(0x80000000, doShiftInt(1));
+ assertEquals(0x8000000000000000L, doShiftLong(1L));
+ }
- /*
- * Portable invocation stub.
- */
-UNIMPLEMENTED art_portable_invoke_stub
+ public static int doShiftInt(int value) {
+ return value << 0xFFFF;
+ }
-UNIMPLEMENTED art_portable_proxy_invoke_handler
+ public static long doShiftLong(long value) {
+ return value << 0xFFFF;
+ }
-UNIMPLEMENTED art_portable_resolution_trampoline
+ public static void assertEquals(int a, int b) {
+ if (a != b) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
-UNIMPLEMENTED art_portable_to_interpreter_bridge
-
-UNIMPLEMENTED art_portable_imt_conflict_trampoline
+ public static void assertEquals(long a, long b) {
+ if (a != b) {
+ throw new Error("Expected " + a + ", got " + b);
+ }
+ }
+}
diff --git a/test/437-inline/expected.txt b/test/437-inline/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/437-inline/expected.txt
diff --git a/test/437-inline/info.txt b/test/437-inline/info.txt
new file mode 100644
index 0000000000..6487a21a36
--- /dev/null
+++ b/test/437-inline/info.txt
@@ -0,0 +1 @@
+Tests inlining in the compiler.
diff --git a/test/437-inline/src/Main.java b/test/437-inline/src/Main.java
new file mode 100644
index 0000000000..ccddab757e
--- /dev/null
+++ b/test/437-inline/src/Main.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ if ($opt$inline$returnInt() != 4) {
+ throw new Error();
+ }
+
+ if ($opt$inline$returnParameter(42) != 42) {
+ throw new Error();
+ }
+
+ if ($opt$inline$returnWide() != 12L) {
+ throw new Error();
+ }
+
+ if ($opt$inline$returnWideParameter(0x100000001L) != 0x100000001L) {
+ throw new Error();
+ }
+
+ if ($opt$inline$returnReferenceParameter(Main.class) != Main.class) {
+ throw new Error();
+ }
+
+ $opt$inline$returnVoid();
+ $opt$inline$returnVoidWithOneParameter(32);
+
+ if ($opt$inline$returnAdd(42, 1) != 43) {
+ throw new Error();
+ }
+
+ if ($opt$inline$returnSub(42, 1) != 41) {
+ throw new Error();
+ }
+ }
+
+ public static int $opt$inline$returnParameter(int a) {
+ return a;
+ }
+
+ public static int $opt$inline$returnAdd(int a, int b) {
+ return a + b;
+ }
+
+ public static int $opt$inline$returnSub(int a, int b) {
+ return a - b;
+ }
+
+ public static int $opt$inline$returnInt() {
+ return 4;
+ }
+
+ public static long $opt$inline$returnWideParameter(long a) {
+ return a;
+ }
+
+ public static long $opt$inline$returnWide() {
+ return 12L;
+ }
+
+ public static Object $opt$inline$returnReferenceParameter(Object o) {
+ return o;
+ }
+
+ public static void $opt$inline$returnVoid() {
+ return;
+ }
+
+ public static void $opt$inline$returnVoidWithOneParameter(int a) {
+ return;
+ }
+}
diff --git a/test/704-multiply-accumulate/expected.txt b/test/704-multiply-accumulate/expected.txt
new file mode 100644
index 0000000000..76f5a5a5aa
--- /dev/null
+++ b/test/704-multiply-accumulate/expected.txt
@@ -0,0 +1 @@
+Done!
diff --git a/test/704-multiply-accumulate/info.txt b/test/704-multiply-accumulate/info.txt
new file mode 100644
index 0000000000..a12fd444ee
--- /dev/null
+++ b/test/704-multiply-accumulate/info.txt
@@ -0,0 +1 @@
+Tests for multiply accumulate operations.
diff --git a/test/704-multiply-accumulate/src/Main.java b/test/704-multiply-accumulate/src/Main.java
new file mode 100644
index 0000000000..7404b9b28d
--- /dev/null
+++ b/test/704-multiply-accumulate/src/Main.java
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ static int imax = Integer.MAX_VALUE;
+ static int imin = Integer.MIN_VALUE;
+ static long lmax = Long.MAX_VALUE;
+ static long lmin = Long.MIN_VALUE;
+ static CA ca;
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void test_int() {
+ int result = 0;
+ int a = imax;
+ int b = imin;
+ int c = 10;
+ int d = c;
+ int tmp = 0;
+ int [] ia = new int[5];
+ for (int i = 0; i < 100; i++) {
+ tmp = i*c;
+ result += i*i;
+ result = i - tmp;
+ }
+ expectEquals(result, -891);
+
+ result = c*c + (result - c);
+ expectEquals(result, -801);
+
+ result = a + a*a;
+ expectEquals(result, -2147483648);
+
+ result = b + b*b;
+ expectEquals(result, -2147483648);
+
+ result = b - a*a;
+ expectEquals(result, 2147483647);
+
+ result = d*d;
+ d++;
+ result += result;
+ expectEquals(result, 200);
+
+ result = c*c;
+ tmp++;
+ result += result;
+ expectEquals(result, 200);
+
+ result = 0;
+ try {
+ result = c*c;
+ ia[c] = d; // array out of bound.
+ result += d;
+ } catch (Exception e) {
+ }
+ expectEquals(result, 100);
+
+ CA obj = new CA();
+ result = a*c + obj.ia;
+ expectEquals(result, 2);
+
+ result = 0;
+ obj = ca;
+ try {
+ result = a*c;
+ tmp = obj.ia;
+ result = result + tmp;
+ } catch (Exception e) {
+ }
+ expectEquals(result, -10);
+ }
+
+ public static void test_long() {
+ long result = 0;
+ long a = lmax;
+ long b = lmin;
+ long c = 10;
+ long d = c;
+ long tmp = 0;
+ int [] ia = new int[5];
+ for (long i = 0; i < 100; i++) {
+ tmp = i*c;
+ result += i*i;
+ result = i - tmp;
+ }
+ expectEquals(result, -891L);
+
+ result = c*c + (result - c);
+ expectEquals(result, -801L);
+
+ result = a + a*a;
+ expectEquals(result, -9223372036854775808L);
+
+ result = b + b*b;
+ expectEquals(result, -9223372036854775808L);
+
+ result = b - a*a;
+ expectEquals(result, 9223372036854775807L);
+
+ result = d*d;
+ d++;
+ result += result;
+ expectEquals(result, 200L);
+
+ result = c*c;
+ tmp++;
+ result += result;
+ expectEquals(result, 200L);
+
+ result = 0;
+ int index = 10;
+ try {
+ result = c*c;
+ ia[index] = 10; // array out of bound.
+ result += d;
+ } catch (Exception e) {
+ }
+ expectEquals(result, 100L);
+
+ CA obj = new CA();
+ result = a*c + obj.la;
+ expectEquals(result, 113L);
+
+ result = 0;
+ obj = ca;
+ try {
+ result = a*c;
+ tmp = obj.la;
+ result = result + tmp;
+ } catch (Exception e) {
+ }
+ expectEquals(result, -10L);
+ }
+
+ public static void main(String[] args) {
+ test_int();
+ test_long();
+ System.out.println("Done!");
+ }
+
+}
+
+class CA {
+ public int ia = 12;
+ public long la = 123L;
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index c3fec5d412..e085d3f470 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -298,32 +298,9 @@ TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
# Known broken tests for the arm64 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
- 003-omnibus-opcodes \
- 004-ReferenceMap \
- 005-annotations \
- 009-instanceof \
- 010-instance \
- 023-many-interfaces \
- 044-proxy \
- 045-reflect-array \
- 046-reflect \
- 047-returns \
- 062-character-encodings \
- 063-process-manager \
- 068-classloader \
- 069-field-type \
- 071-dexfile \
- 106-exceptions2 \
- 107-int-math2 \
- 201-built-in-exception-detail-messages \
- 407-arrays \
- 412-new-array \
- 422-instanceof \
- 424-checkcast \
- 427-bounds \
- 430-live-register-slow-path \
- 436-rem-float \
- 800-smali \
+ 003-omnibus-opcodes64 \
+ 012-math64 \
+ 436-rem-float64
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/tools/art b/tools/art
index d5d546b4ad..2408f9fe98 100644
--- a/tools/art
+++ b/tools/art
@@ -76,10 +76,6 @@ ANDROID_ROOT=$PROG_DIR/..
LIBDIR=$(find_libdir)
LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
-if [ z"$PERF" != z ]; then
- invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
-fi
-
DELETE_ANDROID_DATA=false
# If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
# and ensure we delete it at the end.
@@ -89,6 +85,10 @@ if [ "$ANDROID_DATA" = "/data" ] || [ "$ANDROID_DATA" = "" ]; then
DELETE_ANDROID_DATA=true
fi
+if [ z"$PERF" != z ]; then
+ invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
+fi
+
ANDROID_DATA=$ANDROID_DATA \
ANDROID_ROOT=$ANDROID_ROOT \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 3dc5e7161b..6f9911d061 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -27,5 +27,16 @@
names: ["libcore.java.lang.OldSystemTest#test_getProperties",
"org.apache.harmony.tests.java.lang.Process2Test#test_getErrorStream",
"org.apache.harmony.tests.java.lang.ProcessTest#test_exitValue"]
+},
+{
+ description: "Failures needing investigation",
+ result: EXEC_FAILED,
+ modes: [device],
+ names: ["libcore.java.util.TimeZoneTest#testDisplayNames",
+ "libcore.java.util.TimeZoneTest#test_useDaylightTime_Taiwan",
+ "org.apache.harmony.tests.java.util.DateTest#test_Constructor",
+ "org.apache.harmony.tests.java.util.ScannerTest#test_Constructor_LReadableByteChannel",
+ "org.apache.harmony.tests.java.util.TimeZoneTest#test_hasSameRules_Ljava_util_TimeZone",
+ "libcore.java.util.TimeZoneTest#testAllDisplayNames"]
}
]